text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization utilities for tensorflow graphs."""
import tensorflow as tf
from graphviz import Digraph
import tempfile
import hashlib
import numpy as np
import os
import webbrowser
from IPython.display import clear_output, Image, display, HTML
import time
def tf_to_dot(graph=None, fname=None, format=None):
"""
Create an image from a tensorflow graph.
graph: The tensorflow graph to visualize. Defaults to tf.get_default_graph()
fname: Filename to save the graph image in
format: Optional image extension. If you do not use this, the extension is
derived from the fname.
Returns an org-mode link to the path where the image is.
Adapted from https://blog.jakuba.net/2017/05/30/tensorflow-visualization.html
Note: This can make very large images for complex graphs.
"""
dot = Digraph()
if graph is None:
graph = tf.get_default_graph()
shapes = {'Const': 'circle',
'Placeholder': 'oval'}
for n in graph.as_graph_def().node:
shape = tuple([dim.size for dim
in n.attr['value'].tensor.tensor_shape.dim])
dot.node(n.name, label=f'{n.name} {shape}',
shape=shapes.get(n.op, None))
for i in n.input:
dot.edge(i, n.name)
m = hashlib.md5()
m.update(str(dot).encode('utf-8'))
if fname is None:
fname = 'tf-graph-' + m.hexdigest()
if format is None:
base, ext = os.path.splitext(fname)
fname = base
format = ext[1:] or 'png'
dot.format = format
dot.render(fname)
os.unlink(fname)
print(f'{fname}, {format}')
return f'[[./{fname}.{format}]]'
# Tensorboard visualizations
# Adapted from https://gist.githubusercontent.com/yaroslavvb/97504b8221a8529e7a51a50915206d68/raw/f1473d2873676c0e885b9fbd363c882a7a83b28a/show_graph
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = f"<stripped {size} bytes>".encode('utf-8')
return strip_def
def show_graph(graph_def=None, browser=True,
width=1200, height=800,
max_const_size=32, ungroup_gradients=False):
"""Open a graph in Tensorboard. By default this is done in a browser. If you set
browser to False, then html will be emitted that shows up in a Jupyter
notebook.
"""
if not graph_def:
graph_def = tf.get_default_graph().as_graph_def()
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
data = str(strip_def)
if ungroup_gradients:
data = data.replace('"gradients/', '"b_')
#print(data)
code = """<style>.container {{ width:100% !important; }}</style>
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(data), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:100%;height:100%;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
if browser:
fh, tmpf = tempfile.mkstemp(prefix='tf-graph-', suffix='.html')
os.close(fh)
with open(tmpf, 'w') as f:
f.write(iframe)
webbrowser.open('file://' + tmpf)
else:
display(HTML(iframe))
| google/differentiable-atomistic-potentials | dap/tf/visualize.py | Python | apache-2.0 | 4,530 | 0.001987 |
from ast import Module, ImportFrom, Expr, Call, Name, FunctionDef, Assign, Str
from ast import dump, If, Compare, Eq, For, Attribute, Gt, Num, IsNot, BinOp
from ast import NameConstant, Mult, Add, Import, List, Dict, Is, BoolOp, And
from ast import Subscript, Index, Tuple, Lt, Sub, Global, Return, AugAssign
from ast import While, UnaryOp, Not, ClassDef, Mod, Yield, NotEq, Try, Pass
from ast import ExceptHandler, Break, Slice, USub, ListComp, In, Lambda, BitAnd
from ast import BitOr, Or, Delete, Bytes, Raise, NotIn, RShift, GeneratorExp
from ast import Assert, Set, SetComp, LtE, IfExp, FloorDiv, GtE, With, Continue
from ast import YieldFrom, UAdd, LShift, DictComp, Div, Starred, BitXor, Pow
from _ast import arguments, arg as arg_type, keyword as keyword_type
from _ast import alias as alias_type, comprehension, withitem
try:
# python3.5 support
from _ast import AsyncFor, AsyncFunctionDef, AsyncWith, Await
except ImportError:
AsyncFor= AsyncFunctionDef= AsyncWith= Await= object()
def pprint_body (body, level):
for statement in body:
yield ' '*level
for i in pprint_inner (statement, level): yield i
yield '\n'
def pprint_seq (seq, sep=', '):
for index, elem in enumerate (seq):
if type (elem)==str:
yield elem
else:
for i in pprint_inner (elem): yield i
if index<len (seq)-1:
if type (sep)==str:
yield sep
else:
for i in pprint_inner (sep): yield i
def pprint_orelse (orelse, level):
if len (orelse)>0:
yield ' '*level+'else:\n'
for i in pprint_body (orelse, level+1): yield i
def pprint_args (args, defaults):
# TODO: anotations
# args=[arg(arg='a', annotation=None), arg(arg='b', annotation=None)]
# defaults=[Num(n=1)]
d_index= len (args)-len (defaults)
for index, arg in enumerate (args):
yield arg.arg
if index>=d_index:
yield '='
for i in pprint_inner (defaults[index-d_index]): yield i
if index<len (args)-1:
yield ', '
def pprint (node, level=0):
return ''.join (pprint_inner (node, level))
def pprint_inner (node, level=0):
t= type (node)
if t==Add:
yield '+'
elif t==And:
yield ' and '
elif t==Assert:
# Assert(test=..., msg=None)
yield 'assert '
for i in pprint_inner (node.test): yield i
# TODO: msg
elif t==Assign:
# Assign(targets=[Name(id='c', ctx=Store())],
# value=...)
for i in pprint_inner_seq (node.targets): yield i
yield '= '
for i in pprint_inner (node.value): yield i
elif t==AsyncFor:
yield 'async '
# For(target=..., iter=..., body=[...], orelse=[...])
node= For (target=node.target, iter=node.iter, body=node.body, orelse=node.orelse)
for i in pprint_inner (node): yield i
elif t==AsyncFunctionDef:
yield 'async '
# FunctionDef(name='foo', args=arguments(...), body=[ ... ], decorator_list=[], returns=None)
node= FunctionDef (name=node.name, args=node.args, body=node.body, decorator_list=node.decorator_list,
returns=node.returns)
for i in pprint_inner (node): yield i
elif t==AsyncWith:
yield 'async '
# With(items=[...], body=[...])
node= With (items=node.items, body=node.body)
for i in pprint_inner (node): yield i
elif t==Attribute:
# Attribute(value=Name(id='node', ctx=Load()), attr='body', ctx=Load())
for i in pprint_inner (node.value): yield i
yield '.'
yield node.attr
elif t==AugAssign:
# AugAssign(target=Name(id='ans', ctx=Store()), op=Add(), value=Name(id='a', ctx=Load()))
for i in pprint_inner (node.target): yield i
for i in pprint_inner (node.op): yield i
yield '= '
for i in pprint_inner (node.value): yield i
elif t==Await:
# value=Await(value=...)
yield 'await '
for i in pprint_inner (node.value): yield i
elif t==BinOp:
# BUG:
# m= ast.parse ('5*(3+4)')
# ayrton.ast_pprinter.pprint (m)
# 5*3+4
for i in pprint_inner (node.left): yield i
for i in pprint_inner (node.op): yield i
for i in pprint_inner (node.right): yield i
elif t==BitAnd:
yield ' & '
elif t==BitOr:
yield '|'
elif t==BitXor:
yield '^'
elif t==BoolOp:
pprint_seq (node.values, node.op)
elif t==Break:
yield 'break'
elif t==Bytes:
yield repr (node.s)
elif t==Call:
# Call(func=Name(id='foo', ctx=Load()), args=[], keywords=[], starargs=None, kwargs=None)
# TODO: annotations
for i in pprint_inner (node.func): yield i
yield ' ('
for i in pprint_seq (node.args): yield i
if len (node.args)>0 and (len (node.keywords)>0 or
node.starargs is not None or
node.kwargs is not None):
yield ', '
for i in pprint_seq (node.keywords): yield i
if ((len (node.args)>0 or len (node.keywords)>0) and
(node.starargs is not None or node.kwargs is not None)):
yield ', '
if node.starargs is not None:
yield '*'
for i in pprint_inner (node.starargs): yield i
if ((len (node.args)>0 or
len (node.keywords)>0 or
(node.starargs is not None) and node.kwargs is not None)):
yield ', '
if node.kwargs is not None:
yield '**'
for i in pprint_inner (node.kwargs): yield i
yield ')'
elif t==ClassDef:
# ClassDef(name='ToExpand', bases=[Name(id='object', ctx=Load())],
# keywords=[], starargs=None, kwargs=None, body=[...]
yield 'class '
yield node.name
# TODO: more
if len (node.bases)>0:
yield ' ('
for i in pprint_seq (node.bases): yield i
yield ')'
yield ':'
for i in pprint_body (node.body, level+1): yield i
elif t==Compare:
# Compare(left=Name(id='t', ctx=Load()), ops=[Eq()], comparators=[Name(id='Module', ctx=Load())])
# TODO: do properly
for i in pprint_inner (node.left): yield i
for op in node.ops:
for i in pprint_inner (op): yield i
for comparator in node.comparators:
for i in pprint_inner (comparator): yield i
elif t==Continue:
yield 'continue'
elif t==Delete:
yield 'delete '
for i in pprint_seq (node.targets): yield i
elif t==Dict:
yield '{ '
for k, v in zip (node.keys, node.values):
for i in pprint_inner (k): yield i
yield '='
for i in pprint_inner (v): yield i
yield ', '
yield ' }'
elif t==DictComp:
# DictComp(key=Name(id='v', ctx=Load()), value=Name(id='k', ctx=Load()), generators=[comprehension(target=Tuple(elts=[Name(id='k', ctx=Store()), Name(id='v', ctx=Store())], ctx=Store()), iter=Call(func=Name(id='enumerate', ctx=Load()), args=[Name(id='_b32alphabet', ctx=Load())], keywords=[], starargs=None, kwargs=None), ifs=[])])
yield '{ '
for i in pprint_inner (node.key): yield i
yield ': '
for i in pprint_inner (node.value): yield i
yield ' for '
# TODO: more
for i in pprint_inner (node.generators[0]): yield i
yield ' }'
elif t==Div:
yield '/'
elif t==Eq:
yield '=='
elif t==ExceptHandler:
# ExceptHandler(type=Name(id='KeyError', ctx=Load()), name=None, body=[Pass()])
yield ' '*level+'except '
if node.type is not None:
for i in pprint_inner (node.type): yield i
if node.name is not None:
yield ' as '
yield node.name
yield ':'
for i in pprint_body (node.body, level+1): yield i
elif t==Expr:
# Expr(value=...)
for i in pprint_inner (node.value, level): yield i
elif t==FloorDiv:
yield '\\\\'
elif t==For:
# For(target=..., iter=..., body=[...], orelse=[...])
yield 'for '
for i in pprint_inner (node.target): yield i
yield ' in '
for i in pprint_inner (node.iter): yield i
yield ':\n'
for i in pprint_body (node.body, level+1): yield i
for i in pprint_orelse (node.orelse, level): yield i
elif t==FunctionDef:
# FunctionDef(name='foo', args=arguments(...), body=[ ... ], decorator_list=[], returns=None)
# TODO: decorator_list
# TODO: returns
yield 'def ', node.name, ' ('
for i in pprint_inner (node.args): yield i
yield '):\n'
for i in pprint_body (node.body, level+1): yield i
elif t==GeneratorExp:
# GeneratorExp(elt=Name(id='line', ctx=Load()), generators=[...])
yield '( '
for i in pprint_inner (node.elt): yield i
yield ' for '
# TODO: more
for i in pprint_inner (node.generators[0]): yield i
yield ' )'
elif t==Global:
yield 'global '
for i in pprint_seq (node.names): yield i
elif t==Gt:
yield '>'
elif t==GtE:
yield '>='
elif t==If:
# If(test=..., body=[...], orelse=[...]
yield 'if '
for i in pprint_inner (node.test): yield i
yield ':\n'
for i in pprint_body (node.body, level+1): yield i
if len (node.orelse)>0:
# special case for elif
if len (node.orelse)==1 and type (node.orelse[0])==If:
yield ' '*level+'el'
for i in pprint_inner (node.orelse[0], level): yield i
else:
for i in pprint_orelse (node.orelse, level): yield i
elif t==IfExp:
# IfExp(test=..., body=Str(s=''), orelse=Str(s='s'))
for i in pprint_inner (node.body): yield i
yield ' if '
for i in pprint_inner (node.test): yield i
yield ' else '
for i in pprint_inner (node.orelse): yield i
elif t==Import:
# Import(names=[alias(name='ayrton', asname=None)])
yield "import "
for i in pprint_seq (node.names): yield i
elif t==ImportFrom:
# ImportFrom(module='ayrton.execute', names=[alias(name='Command', asname=None)], level=0)
yield "from "
yield node.module
yield " import "
for i in pprint_seq (node.names): yield i
elif t==In:
yield ' in '
elif t==Index:
for i in pprint_inner (node.value): yield i
elif t==Is:
yield ' is '
elif t==IsNot:
yield ' is not '
elif t==LShift:
yield '<<'
elif t==Lambda:
# Lambda(args=arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]), body=Num(n=0))
yield 'lambda '
for i in pprint_inner (node.args): yield i
yield ': '
for i in pprint_inner (node.body): yield i
elif t==List:
yield '[ '
for i in pprint_seq (node.elts): yield i
yield ' ]'
elif t==ListComp:
# ListComp(elt=Name(id='i', ctx=Load()), generators=[...])
# [ i for i in self.indexes if i.right is not None ]
yield '[ '
for i in pprint_inner (node.elt): yield i
yield ' for '
# TODO: more
for i in pprint_inner (node.generators[0]): yield i
yield ' ]'
elif t==Lt:
yield '<'
elif t==LtE:
yield '<='
elif t==Mod:
yield ' % '
elif t==Module:
# Module(body=[ ... ])
for i in pprint_body (node.body, 0): yield i
elif t==Mult:
yield '*'
elif t==Name:
yield node.id
elif t==NameConstant:
yield node.value
elif t==Not:
yield 'not '
elif t==NotEq:
yield '!='
elif t==NotIn:
yield ' not in '
elif t==Num:
yield str (node.n)
elif t==Or:
yield ' or '
elif t==Pass:
yield 'pass\n'
elif t==Pow:
yield '**'
elif t==RShift:
yield '>>'
elif t==Raise:
# Raise(exc=Call(func=Name(id='ValueError', ctx=Load()),
# args=[Str(s='too many lines')], keywords=[],
# starargs=None, kwargs=None),
# cause=None)
yield 'raise '
if node.exc is not None:
for i in pprint_inner (node.exc): yield i
# TODO: cause?
elif t==Return:
yield 'return '
if node.value is not None:
for i in pprint_inner (node.value): yield i
elif t==Set:
yield '{ '
pprint_seq (node.elts)
yield ' }'
elif t==SetComp:
# SetComp(elt=Name(id='name', ctx=Load()), generators=[...])
yield '{ '
for i in pprint_inner (node.elt): yield i
yield ' for '
# TODO: more
for i in pprint_inner (node.generators[0]): yield i
elif t==Slice:
# Slice(lower=None, upper=Name(id='left_cb', ctx=Load()), step=None)
if node.lower is not None:
for i in pprint_inner (node.lower): yield i
yield ':'
if node.upper is not None:
for i in pprint_inner (node.upper): yield i
if node.step is not None:
yield ':'
for i in pprint_inner (node.step): yield i
elif t==Str:
# Str(s='true')
yield repr (node.s)
elif t==Sub:
yield '-'
elif t==Subscript:
# Subscript(value=Attribute(value=Name(id='node', ctx=Load()), attr='orelse', ctx=Load()),
# slice=Index(value=Num(n=0)), ctx=Load())
for i in pprint_inner (node.value): yield i
yield '['
for i in pprint_inner (node.slice): yield i
yield ']'
elif t==Try:
# Try(body=[...], handlers=[...], orelse=[], finalbody=[])
yield 'try:\n'
pprint_body (node.body, level+1)
if len (node.handlers)>0:
for handler in node.handlers:
for i in pprint_inner (handler, level): yield i
for i in pprint_orelse (node.orelse, level): yield i
if len (node.finalbody)>0:
yield ' '*level+'finally:\n'
for i in pprint_body (node.finalbody, level+1): yield i
elif t==Tuple:
yield '( '
for i in pprint_seq (node.elts): yield i
yield ' )'
elif t==UAdd:
yield '+'
elif t==USub:
yield '-'
elif t==UnaryOp:
for i in pprint_inner (node.op): yield i
for i in pprint_inner (node.operand): yield i
elif t==While:
yield 'while '
for i in pprint_inner (node.test): yield i
yield ':\n'
for i in pprint_body (node.body, level+1): yield i
for i in pprint_orelse (node.orelse, level): yield i
elif t==With:
yield 'with '
for i in pprint_seq (node.items): yield i
yield ':\n'
for i in pprint_body (node.body, level+1): yield i
elif t==Yield:
# Yield(value=Attribute(value=Name(id='self', ctx=Load()), attr='left', ctx=Load()))
yield 'yield '
for i in pprint_inner (node.value): yield i
elif t==YieldFrom:
yield 'yield from '
for i in pprint_inner (node.value): yield i
elif t==alias_type:
yield node.name
if node.asname is not None:
yield " as "
yield node.asname
elif t==arg_type:
# arg(arg='node', annotation=None)
# TODO: annotation
yield node.arg
elif t==arguments:
# arguments(args=[arg(arg='a', annotation=None), arg(arg='b', annotation=None)],
# vararg=arg(arg='more', annotation=None), kwonlyargs=[], kw_defaults=[],
# kwarg=arg(arg='kmore', annotation=None), defaults=[Num(n=1)])
# this is tricky
# first there are five, not four, types of arguments
# positional, positional with default value, extra positional, keywords
# and extra keywords
# positional arguments are in args, and the default values in defaults
# but you have to calculate to which args they belong
# extra positional is in vararg
# keyword arguments are in kwonlyargs and the defaults in kw_defaults
# extra keywords is in kwarg
for i in pprint_args (node.args, node.defaults): yield i
if len (node.args)>0 and (node.vararg is not None or
len (node.kwonlyargs)>0 or
node.kwarg is not None):
yield ', '
if node.vararg is not None:
yield '*'
for i in pprint_inner (node.vararg): yield i
if ((len (node.args)>0 or node.vararg is not None) and
(len (node.kwonlyargs)>0 or node.kwarg is not None)):
yield ', '
for i in pprint_args (node.kwonlyargs, node.kw_defaults): yield i
if ((len (node.args)>0 or
node.vararg is not None or
len (node.kwonlyargs)>0) and node.kwarg is not None):
yield ', '
if node.kwarg is not None:
yield '**'
for i in pprint_inner (node.kwarg): yield i
elif t==comprehension:
# comprehension(target=Name(id='i', ctx=Store()),
# iter=Attribute(value=Name(id='self', ctx=Load()),
# attr='indexes', ctx=Load()),
# ifs=[Compare(left=..., ops=[IsNot()],
# comparators=[NameConstant(value=None)])])
# i in self.indexes if i.right is not None
for i in pprint_inner (node.target): yield i
yield ' in '
for i in pprint_inner (node.iter): yield i
if len (node.ifs)>0:
# TODO: more
yield ' if '
for i in pprint_inner (node.ifs[0]): yield i
elif t==keyword_type:
# keyword(arg='end', value=Str(s=''))
yield node.arg
yield '='
for i in pprint_inner (node.value): yield i
elif t==withitem:
# withitem(context_expr=..., optional_vars=Name(id='f', ctx=Store()))
for i in pprint_inner (node.context_expr): yield i
if node.optional_vars is not None:
yield ' as '
for i in pprint_inner (node.optional_vars): yield i
elif t==str:
yield node
else:
yield '\n'
yield '# unknown construction\n'
yield dump (node)
| jjconti/ayrton | ayrton/ast_pprinter.py | Python | gpl-3.0 | 18,702 | 0.020426 |
import web
from inginious.frontend.plugins.utils.admin_api import AdminApi
from inginious.frontend.plugins.utils import get_mandatory_parameter
class FilterTasksApi(AdminApi):
def API_POST(self):
parameters = web.input()
task_query = get_mandatory_parameter(parameters, "task_query")
limit = int(get_mandatory_parameter(parameters, "limit"))
page = int(get_mandatory_parameter(parameters, "page"))
course_ids = set(bank["courseid"]
for bank in self.database.problem_banks.find())
for course_id, course in self.course_factory.get_all_courses().items():
if self.user_manager.has_admin_rights_on_course(course):
course_ids.add(course_id)
tasks = list(self.database.tasks_cache.aggregate([
{
"$match":
{
"$text": {
"$search": task_query,
"$diacriticSensitive": False,
"$caseSensitive": False
}
}
},
{
"$match":
{
"course_id": {"$in": list(course_ids)}
}
},
{
"$project": {
"course_id": 1,
"task_id": 1,
"task_name": 1,
"task_author": 1,
"task_context": 1,
"tags": 1,
"course_name": 1,
"_id": 0,
"score": {"$meta": "textScore"}
}
},
{
"$sort": {"score": -1}
}
]))
left = limit * (page - 1)
right = left + limit
total_pages = len(tasks) // limit
if len(tasks) % limit != 0 or total_pages == 0:
total_pages += 1
if right >= len(tasks):
tasks = tasks[left:]
else:
tasks = tasks[left:right]
response = {'total_pages': total_pages, "tasks": tasks}
return 200, response
| JuezUN/INGInious | inginious/frontend/plugins/problem_bank/pages/api/filter_tasks_api.py | Python | agpl-3.0 | 2,183 | 0 |
class Solution:
def findMin(self, nums):
mlength = len(nums)
if mlength == 0:
return -1
left = 0
right = mlength - 1
while left <= right:
mid = (left + right) >> 1
if mid == mlength - 1:
return nums[0]
if nums[mid] > nums[mid+1]:
return nums[mid+1]
else:
if nums[left] > nums[mid]:
right = mid - 1
elif nums[left] == nums[mid]:
left += 1
elif nums[left] < nums[mid]:
left = mid + 1
return nums[0]
# There is some problems of this file | WilliamYi96/Machine-Learning | LeetCode/0154.py | Python | apache-2.0 | 701 | 0.002853 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
""" Empty class temporary left for compatibility with previous interfaces """
from __future__ import (absolute_import, division, print_function)
import Direct.DirectEnergyConversion as DRC
from mantid.simpleapi import *
from mantid.kernel import funcinspect
from mantid import api
# the class which is responsible for data reduction
global Reducer
Reducer = None
# Statement used at debug time to pull changes in DirectEnergyConversion into Mantid
#DRC=reload(DRC)
def getReducer():
# needed on Linux to adhere to correct reference return
global Reducer
return Reducer
def setup(instname=None,reload=False):
"""
setup('mar')
setup instrument reduction parameters from instname_parameter.xml file
if the instrument has already been defined, does nothing unless
reload = True is specified
"""
global Reducer
if instname is None :
instname = config['default.instrument']
if not (Reducer is None or Reducer.prop_man is None):
old_name=Reducer.prop_man.instr_name
if old_name.upper()[0:3] == instname.upper()[0:3] :
if not reload :
return # has been already defined
Reducer = DRC.setup_reducer(instname,reload)
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,second_wb=None,**kwargs):
""" One step conversion of run into workspace containing information about energy transfer
Usage:
>>arb_units(wb_run,sample_run,ei_guess,rebin)
>>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments)
>>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments)
>>arb_units(wb_run Whitebeam run number or file name or workspace
sample_run sample run number or file name or workspace
ei_guess Ei guess
rebin Rebin parameters
mapfile Mapfile -- if absent/'default' the defaults from IDF are used
monovan_run If present will do the absolute units normalization. Number of additional parameters
specified in **kwargs is usually requested for this. If they are absent, program uses defaults,
but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run.
arguments The dictionary containing additional keyword arguments.
The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in
MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions
with run numbers as input:
>>dgreduce.arb_units(1000,10001,80,[-10,.1,70]) # will run on default instrument
>>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required)
>>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True)
A detector calibration file must be specified if running the reduction with workspaces as input
namely:
>>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file
,diag_remove_zero=False,norm_method='current')
type help() for the list of all available keywords. All available keywords are provided in InstName_Parameters.xml file
Some samples are:
norm_method =[monitor-1],[monitor-2][Current]
background =False , True
fixei =False , True
save_format =['.spe'],['.nxspe'],'none'
detector_van_range =[20,40] in mev
bkgd_range =[15000,19000] :integration range for background tests
second_white - If provided an additional set of tests is performed on this. (default = None)
hardmaskPlus - A file specifying those spectra that should be masked without testing (default=None)
tiny - Minimum threshold for acceptance (default = 1e-10)
large - Maximum threshold for acceptance (default = 1e10)
bkgd_range - A list of two numbers indicating the background range (default=instrument defaults)
diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01)
diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.)
diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1)
diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5)
diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
difference with respect to the median value must also exceed this number of error bars (default=0.0)
diag_remove_zero - If true then zeroes in the vanadium data will count as failed (default = True)
diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0)
diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0)
diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
difference with respect to the median value must also exceed this number of error bars (default=3.3)
variation -The number of medians the ratio of the first/second white beam can deviate from
the average by (default=1.1)
bleed_test - If true then the CreatePSDBleedMask algorithm is run
bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube
bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the
bleed test diagnostic
print_results - If True then the results are printed to the screen
diag_remove_zero =True, False (default):Diag zero counts in background range
bleed=True , turn bleed correction on and off on by default for Merlin and LET
sum =True,False(default) , sum multiple files
det_cal_file= a valid detector block file and path or a raw file. Setting this
will use the detector calibraion from the specified file NOT the
input raw file
mask_run = RunNumber to use for diag instead of the input run number
one2one =True, False :Reduction will not use a mapping file
hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask
hardmaskOnly=Filename :load a hardmask and use as only mask
"""
global Reducer
if Reducer is None or Reducer.instrument is None:
raise ValueError("instrument has not been defined, call setup(instrument_name) first.")
# --------------------------------------------------------------------------------------------------------
# Deal with mandatory parameters for this and may be some top level procedures
# --------------------------------------------------------------------------------------------------------
if sample_run:
Reducer.sample_run = sample_run
sample_run = None
try:
n,r=funcinspect.lhs_info('both')
wksp_out=r[0]
except:
wksp_out = "reduced_ws"
#
res = Reducer.convert_to_energy(wb_run,sample_run,ei_guess,rebin,map_file,monovan_run,second_wb,**kwargs)
#
results_name = res.name()
if results_name != wksp_out:
RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)
return res
def runs_are_equal(ws1,ws2):
"""Compare two run numbers, provided either as run numbers,
or as workspaces or as ws names"""
if ws1 == ws2:
return True
#-----------------------------------------------
def get_run_num(name_or_ws):
err = None
try:
if isinstance(name_or_ws,api.MatrixWorkspace):
run_num = name_or_ws.getRunNumber()
elif name_or_ws in mtd: # this is also throw Boost.Python.ArgumentError error if mtd not accepts it
ws = mtd[name_or_ws]
run_num = ws.getRunNumber()
else:
raise AttributeError
except Exception as err:
pass
if err is not None:
raise AttributeError("Input parameter is neither workspace nor ws name")
return run_num
#-----------------------------------------------
try:
run_num1 = get_run_num(ws1)
except AttributeError:
return False
try:
run_num2 = get_run_num(ws2)
except AttributeError:
return False
return run_num1==run_num2
def abs_units(wb_for_run,sample_run,monovan_run,wb_for_monovanadium,samp_rmm,samp_mass,
ei_guess,rebin,map_file='default',monovan_mapfile='default',**kwargs):
"""
dgreduce.abs_units(wb_run Whitebeam run number or file name or workspace
sample_run Sample run run number or file name or workspace
monovan_run Monochromatic run run number or file name or workspace
wb_mono White beam for Monochromatic run run number or file name or workspace
samp_rmm Mass of formula unit of sample
samp_mass Actual sample mass
ei_guess Ei guess of run
rebin Rebin parameters for output data
map_file Mapfile for sample run
monovan_mapfile Mapfile for mono van run
keyword arguments Any specified additional keyword arguments
Example with run numbers
abs_units(11001,11002,11003,10098,250.1,5.2,80,'-10,.1,75','mari_res','mari_res')
A detector calibration file must be specified if running the reduction with workspace inputs
Example with workspace inputs
abs_units('wb_run','sam_run','mono_run','wb_for_mono',250.1,5.2,80,'-10,.1,75','mari_res','mari_res',
det_cal_file=10001,diag_remove_zero=False,norm_method='current')
A detector calibration file must be specified if running the reduction with workspace inputs
Available keywords
norm_method =[monitor-1],[monitor-2][Current]
background =False , True
fixei =False , True
save_format =['.spe'],['.nxspe'],'none'
detector_van_range =[20,40] in mev
bkgd_range =[15000,19000] :integration range for background tests
second_white - If provided an additional set of tests is performed on this. (default = None)
hard_mask_file - A file specifying those spectra that should be masked without testing (default=None)
tiny - Minimum threshold for acceptance (default = 1e-10)
large - Maximum threshold for acceptance (default = 1e10)
bkgd_range - A list of two numbers indicating the background range (default=instrument defaults)
diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01)
diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.)
diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1)
diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5)
diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
difference with respect to the median value must also exceed this number of error bars (default=0.0)
diag_remove_zero - If true then zeros in the vanadium data will count as failed (default = True)
diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0)
diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0)
diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
difference with respect to the median value must also exceed this number of error bars (default=3.3)
variation -The number of medians the ratio of the first/second white beam can deviate from
the average by (default=1.1)
bleed_test - If true then the CreatePSDBleedMask algorithm is run
bleed_maxrate - If the bleed test is on then this is the maximum frame rate allowed in a tube
bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the
bleed test diagnostic
print_results - If True then the results are printed to the screen
diag_remove_zero =True, False (default):Diag zero counts in background range
bleed=True , turn bleed correction on and off on by default for Merlin and LET
sum =True,False(default) , sum multiple files
det_cal_file= a valid detector block file and path or a raw file. Setting this
will use the detector calibration from the specified file NOT the
input raw file
mask_run = RunNumber to use for diag instead of the input run number
one2one =True, False :Reduction will not use a mapping file
hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask
hardmaskOnly=Filename :load a hardmask and use as only mask
use_sam_msk_on_monovan=False This will set the total mask to be that of the sample run
abs_units_van_range=[-40,40] integral range for absolute vanadium data
mono_correction_factor=float User specified correction factor for absolute units normalization
"""
kwargs['monovan_mapfile'] = monovan_mapfile
kwargs['sample_mass'] = samp_mass
kwargs['sample_rmm'] = samp_rmm
if sample_run:
Reducer.sample_run = sample_run
sample_run = None
try:
n,r=funcinspect.lhs_info('both')
results_name=r[0]
except:
results_name = Reducer.prop_man.get_sample_ws_name()
if runs_are_equal(wb_for_run,wb_for_monovanadium):# wb_for_monovanadium property does not accept duplicated workspace
wb_for_monovanadium = None # if this value is none, it is constructed to be equal to wb_for_run
wksp_out = arb_units(wb_for_run,sample_run,ei_guess,rebin,map_file,monovan_run,wb_for_monovanadium,**kwargs)
if results_name != wksp_out.name():
RenameWorkspace(InputWorkspace=wksp_out,OutputWorkspace=results_name)
return wksp_out
if __name__=="__main__":
pass
# unittest.main()
#help()
#help("rubbish")
#for attr in dir(Reducer):
# print "Reduce.%s = %s" % (attr, getattr(Reducer, attr))
| mganeva/mantid | scripts/Inelastic/Direct/dgreduce.py | Python | gpl-3.0 | 15,191 | 0.009413 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/community_crafting/component/shared_reinforced_wall_module.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/draft_schematic/community_crafting/component/shared_reinforced_wall_module.py | Python | mit | 480 | 0.045833 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: neorun.py <cmd=arg>
--start=path/to/neo4j/home <cmd> [arg]
: start the neo4j server in the folder specified by the path
-v version : download the version provided if no neo4j detected
-n neo4j-version: download this specific neo4j enterprise nightly version from teamcity with basic access auth
-l download-url : download the neo4j provided by this url if no neo4j found
-p new-password : change the default password to this new password
--stop=path/to/neo4j/home : stop a neo4j server
-h : show this help message
Example: neorun.py -h
neorun.py --start=path/to/neo4j/home -v 3.0.1 -p TOUFU
neorun.py --start=path/to/neo4j/home -n 3.0 -p TOUFU
neorun.py --start=path/to/neo4j/home -n 3.1
neorun.py --stop=path/to/neo4j/home
"""
import getopt
from sys import argv, stdout, exit
from neoget import neo4j_default_archive, neo4j_archive, download
from neoctl import neo4j_start, neo4j_stop, neo4j_update_default_password
from os import path, rename, getenv
import socket
from time import time, sleep, strftime
KNOWN_HOST = path.join(path.expanduser("~"), ".neo4j", "known_hosts")
NEORUN_START_ARGS_NAME = "NEORUN_START_ARGS"
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
ServerStatus = Enum(["STARTED", "STOPPED" ])
def main():
if len(argv) <= 1:
print_help()
exit(2)
try:
opts, args = getopt.getopt(argv[1:], "hv:n:l:p:", ["start=", "stop="])
except getopt.GetoptError as err:
print(str(err))
print_help()
exit(2)
else:
exit_code = 0
for opt, arg in opts:
if opt == '-h':
print_help()
exit(2)
if opt == "--start":
neo4j_home = path.abspath(arg)
if neo4j_status() == ServerStatus.STARTED:
stdout.write("Failed to start neo4j as a neo4j server is already running on this machine.\n")
exit(2)
# get the opts from env
env = getenv(NEORUN_START_ARGS_NAME)
if env:
stdout.write("WARNING: using env var `NEORUN_START_ARGS=%s`\n" % env)
try:
start_opts, start_args = getopt.getopt(env.split(), "v:n:l:p:")
except getopt.GetoptError as err:
print(str(err))
print_help()
exit(2)
else:
start_opts = opts
# parse the opts under --start
archive_url, archive_name, require_basic_auth = neo4j_default_archive()
password = ''
for start_opt, start_arg in start_opts:
if start_opt == "-p":
password = start_arg
elif start_opt in ['-v', '-n', '-l']:
archive_url, archive_name, require_basic_auth = neo4j_archive(start_opt, start_arg)
exit_code = handle_start(archive_url, archive_name, neo4j_home, require_basic_auth)
if exit_code == 0 and password:
exit_code = neo4j_update_default_password("localhost", 7474, new_password=password) or 0
elif opt == "--stop":
if neo4j_status() == ServerStatus.STOPPED:
stdout.write("Failed to stop server as no neo4j server is running on this machine.\n")
exit(2)
exit_code = neo4j_stop(neo4j_home=arg) or test_neo4j_status(ServerStatus.STOPPED) or 0
if exit_code != 0:
break
exit(exit_code)
def handle_start(archive_url, archive_name, neo4j_home, require_basic_auth):
if not path.exists(neo4j_home):
folder_name=download(archive_url, archive_name, path.dirname(neo4j_home), require_basic_auth)
if not path.exists(neo4j_home):
# the untared name is different from what the user gives
rename(path.join(path.dirname(neo4j_home), folder_name), neo4j_home)
if path.exists(KNOWN_HOST):
known_host_backup_name = KNOWN_HOST + strftime("%Y%m%d-%H%M%S") + ".backup"
stdout.write("Found an existing known_host file, renaming it to %s.\n" % (known_host_backup_name))
rename(KNOWN_HOST, known_host_backup_name)
exit_code = neo4j_start(neo4j_home) or 0
if exit_code == 0:
exit_code = test_neo4j_status()
return exit_code
# Test if the neo4j server is started (status = STARTED)
# or if the neo4j server is stopped (status = STOPPED) within 4 mins.
# Return 0 if the test success, otherwise 1
def test_neo4j_status(status = ServerStatus.STARTED):
success = False
start_time = time()
timeout = 60 * 4 # in seconds
count = 0
while not success:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
actual_status = s.connect_ex(("localhost", 7474))
if status == ServerStatus.STARTED:
success = True if actual_status == 0 else False
else:
success = True if actual_status != 0 else False
s.close()
current_time = time()
if current_time - start_time > timeout:
# failed to connect to server within timeout
stdout.write("Failed to start server in 4 mins\n")
return 1
count += 1
if count % 10 == 0:
stdout.write(".") # print .... to indicate working on it
sleep(0.1) # sleep for 100ms
# server is started
stdout.write("\n")
return 0
def neo4j_status():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = ServerStatus.STARTED if s.connect_ex(("localhost", 7474)) == 0 else ServerStatus.STOPPED
s.close()
return server_status
def print_help():
print(__doc__)
if __name__ == "__main__":
main()
| mjbradburn/masters_project | node_modules/neo4j-driver/neokit/neorun.py | Python | apache-2.0 | 6,800 | 0.004118 |
# OeQ autogenerated correlation for 'Window/Wall Ratio East in Correlation to the Building Age'
import math
import numpy as np
from . import oeqCorrelation as oeq
def get(*xin):
# OeQ autogenerated correlation for 'Window to Wall Ratio in Eastern Direction'
A_WIN_E_BY_AW= oeq.correlation(
const= -6820.10365041,
a= 14.4414621854,
b= -0.0114555878046,
c= 4.03451500345e-06,
d= -5.32281636263e-10,
mode= "lin")
return dict(A_WIN_E_BY_AW=A_WIN_E_BY_AW.lookup(*xin))
| UdK-VPT/Open_eQuarter | mole3/stat_corr/window_wall_ratio_east_SDH_by_building_age_correlation.py | Python | gpl-2.0 | 520 | 0.040385 |
#!/usr/bin/env python
# -*- python -*-
#
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
from __future__ import print_function
import os, sys, re, MySQLdb
dirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.realpath(os.path.join(dirNm, "../libexec")))
from XALTdb import XALTdb
from xalt_util import dbConfigFn
import argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--dbname", dest='dbname', action="store", default = "xalt", help="xalt")
args = parser.parse_args()
return args
def main():
"""
This program upgrade the Database used by XALT to the current version
from the version specified in script name.
"""
args = CmdLineOptions().execute()
configFn = dbConfigFn(args.dbname)
if (not os.path.isfile(configFn)):
dirNm, exe = os.path.split(sys.argv[0])
fn = os.path.join(dirNm, configFn)
if (os.path.isfile(fn)):
configFn = fn
else:
configFn = os.path.abspath(os.path.join(dirNm, "../site", configFn))
xalt = XALTdb(configFn)
db = xalt.db()
try:
conn = xalt.connect()
cursor = conn.cursor()
# If MySQL version < 4.1, comment out the line below
cursor.execute("SET SQL_MODE=\"NO_AUTO_VALUE_ON_ZERO\"")
cursor.execute("USE "+xalt.db())
idx = 1
print("start")
###########################################################################
# NOTE: New DB schema createDB.py uses unsigned int for some columns, but #
# modifying a signed to unsigned is problematic for columns already #
# referenced as FOREIGN KEYS. Therefor this script does not update #
# those columns. #
###########################################################################
# 1
cursor.execute("""
ALTER TABLE `xalt_link`
MODIFY COLUMN `link_program` varchar(64) NOT NULL,
ADD COLUMN `link_path` varchar(1024) NOT NULL AFTER `link_program`,
ADD COLUMN `link_module_name` varchar(64) AFTER `link_path`,
ADD COLUMN `link_line` blob AFTER `link_module_name`,
ADD INDEX `index_date` (`date`)
""")
print("(%d) upgraded xalt_link table" % idx); idx += 1
# 4
cursor.execute("""
ALTER TABLE `xalt_run`
MODIFY COLUMN `job_id` char(64) NOT NULL,
MODIFY COLUMN `num_cores` int(11) unsigned NOT NULL,
MODIFY COLUMN `job_num_cores` int(11) unsigned NOT NULL,
MODIFY COLUMN `num_nodes` int(11) unsigned NOT NULL,
MODIFY COLUMN `num_threads` smallint(6) unsigned NOT NULL,
MODIFY COLUMN `exit_code` smallint(6) NOT NULL,
ADD COLUMN `cmdline` blob NOT NULL AFTER `cwd`,
ADD INDEX `index_date` (`date` )
""")
print("(%d) upgraded xalt_run table" % idx); idx += 1
# 6
cursor.execute("""
ALTER TABLE `xalt_env_name`
ADD INDEX `a_env_name` (`env_name`)
""")
print("(%d) upgraded xalt_env_name table" % idx)
idx += 1
# 7
cursor.execute("""
ALTER TABLE `join_run_env`
MODIFY COLUMN `join_id` bigint(20) unsigned NOT NULL auto_increment
""")
print("(%d) upgraded join_run_env table" % idx)
idx += 1
#11
cursor.execute("""
ALTER TABLE `join_link_function`
MODIFY COLUMN `join_id` int(11) unsigned NOT NULL auto_increment
""")
print("(%d) upgraded xalt_function table" % idx)
idx += 1
# 12
cursor.execute("""
CREATE TABLE IF NOT EXISTS `xalt_total_env` (
`envT_id` bigint(20) unsigned NOT NULL auto_increment,
`run_id` int(11) NOT NULL,
`env_blob` blob NOT NULL,
PRIMARY KEY (`envT_id`),
FOREIGN KEY (`run_id`) REFERENCES `xalt_run`(`run_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci AUTO_INCREMENT=1
""")
print("(%d) created xalt_env table" % idx); idx += 1
cursor.close()
except MySQLdb.Error as e:
print ("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit (1)
if ( __name__ == '__main__'): main()
| xalt/xalt | contrib/upgradeDB_From0.7.1.py | Python | lgpl-2.1 | 5,640 | 0.009929 |
from flask import Flask, redirect, request
import flask_admin as admin
from flask.views import MethodView
class ViewWithMethodViews(admin.BaseView):
@admin.expose('/')
def index(self):
return self.render('methodtest.html')
@admin.expose_plugview('/_api/1')
class API_v1(MethodView):
def get(self, cls):
return cls.render('test.html', request=request, name="API_v1")
def post(self, cls):
return cls.render('test.html', request=request, name="API_v1")
@admin.expose_plugview('/_api/2')
class API_v2(MethodView):
def get(self, cls):
return cls.render('test.html', request=request, name="API_v2")
def post(self, cls):
return cls.render('test.html', request=request, name="API_v2")
# Create flask app
app = Flask(__name__, template_folder='templates')
# Flask views
@app.route('/')
def index():
return redirect('/admin')
if __name__ == '__main__':
# Create admin interface
admin = admin.Admin(name="Example: MethodView")
admin.add_view(ViewWithMethodViews())
admin.init_app(app)
# Start app
app.run(debug=True)
| radioprotector/flask-admin | examples/methodview/app.py | Python | bsd-3-clause | 1,160 | 0 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod, abstractproperty
import six
# The following keys are used in the segment dictionaries passed via
# the driver API. These are defined separately from similar keys in
# neutron.extensions.providernet so that drivers don't need to change
# if/when providernet moves to the core API.
#
ID = 'id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
@six.add_metaclass(ABCMeta)
class TypeDriver(object):
"""Define stable abstract interface for ML2 type drivers.
ML2 type drivers each support a specific network_type for provider
and/or tenant network segments. Type drivers must implement this
abstract interface, which defines the API by which the plugin uses
the driver to manage the persistent type-specific resource
allocation state associated with network segments of that type.
Network segments are represented by segment dictionaries using the
NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined
above, corresponding to the provider attributes. Future revisions
of the TypeDriver API may add additional segment dictionary
keys. Attributes not applicable for a particular network_type may
either be excluded or stored as None.
"""
@abstractmethod
def get_type(self):
"""Get driver's network type.
:returns network_type value handled by this driver
"""
pass
@abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abstractmethod
def validate_provider_segment(self, segment):
"""Validate attributes of a provider network segment.
:param segment: segment dictionary using keys defined above
:raises: neutron.common.exceptions.InvalidInput if invalid
Called outside transaction context to validate the provider
attributes for a provider network segment. Raise InvalidInput
if:
- any required attribute is missing
- any prohibited or unrecognized attribute is present
- any attribute value is not valid
The network_type attribute is present in segment, but
need not be validated.
"""
pass
@abstractmethod
def reserve_provider_segment(self, session, segment):
"""Reserve resource associated with a provider network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transaction context on session to reserve the
type-specific resource for a provider network segment. The
segment dictionary passed in was returned by a previous
validate_provider_segment() call.
"""
pass
@abstractmethod
def allocate_tenant_segment(self, session):
"""Allocate resource for a new tenant network segment.
:param session: database session
:returns: segment dictionary using keys defined above
Called inside transaction context on session to allocate a new
tenant network, typically from a type-specific resource
pool. If successful, return a segment dictionary describing
the segment. If tenant network segment cannot be allocated
(i.e. tenant networks not supported or resource pool is
exhausted), return None.
"""
pass
@abstractmethod
def release_segment(self, session, segment):
"""Release network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transaction context on session to release a
tenant or provider network's type-specific resource. Runtime
errors are not expected, but raising an exception will result
in rollback of the transaction.
"""
pass
@six.add_metaclass(ABCMeta)
class NetworkContext(object):
"""Context passed to MechanismDrivers for changes to network resources.
A NetworkContext instance wraps a network resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abstractproperty
def current(self):
"""Return the current state of the network.
Return the current state of the network, as defined by
NeutronPluginBaseV2.create_network and all extensions in the
ml2 plugin.
"""
pass
@abstractproperty
def original(self):
"""Return the original state of the network.
Return the original state of the network, prior to a call to
update_network. Method is only valid within calls to
update_network_precommit and update_network_postcommit.
"""
pass
@abstractproperty
def network_segments(self):
"""Return the segments associated with this network resource."""
pass
@six.add_metaclass(ABCMeta)
class SubnetContext(object):
"""Context passed to MechanismDrivers for changes to subnet resources.
A SubnetContext instance wraps a subnet resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abstractproperty
def current(self):
"""Return the current state of the subnet.
Return the current state of the subnet, as defined by
NeutronPluginBaseV2.create_subnet and all extensions in the
ml2 plugin.
"""
pass
@abstractproperty
def original(self):
"""Return the original state of the subnet.
Return the original state of the subnet, prior to a call to
update_subnet. Method is only valid within calls to
update_subnet_precommit and update_subnet_postcommit.
"""
pass
@six.add_metaclass(ABCMeta)
class PortContext(object):
"""Context passed to MechanismDrivers for changes to port resources.
A PortContext instance wraps a port resource. It provides helper
methods for accessing other relevant information. Results from
expensive operations are cached so that other MechanismDrivers can
freely access the same information.
"""
@abstractproperty
def current(self):
"""Return the current state of the port.
Return the current state of the port, as defined by
NeutronPluginBaseV2.create_port and all extensions in the ml2
plugin.
"""
pass
@abstractproperty
def original(self):
"""Return the original state of the port.
Return the original state of the port, prior to a call to
update_port. Method is only valid within calls to
update_port_precommit and update_port_postcommit.
"""
pass
@abstractproperty
def network(self):
"""Return the NetworkContext associated with this port."""
pass
@abstractproperty
def bound_segment(self):
"""Return the currently bound segment dictionary."""
pass
@abstractproperty
def original_bound_segment(self):
"""Return the original bound segment dictionary.
Return the original bound segment dictionary, prior to a call
to update_port. Method is only valid within calls to
update_port_precommit and update_port_postcommit.
"""
pass
@abstractproperty
def bound_driver(self):
"""Return the currently bound mechanism driver name."""
pass
@abstractproperty
def original_bound_driver(self):
"""Return the original bound mechanism driver name.
Return the original bound mechanism driver name, prior to a
call to update_port. Method is only valid within calls to
update_port_precommit and update_port_postcommit.
"""
pass
@abstractmethod
def host_agents(self, agent_type):
"""Get agents of the specified type on port's host.
:param agent_type: Agent type identifier
:returns: List of agents_db.Agent records
"""
pass
@abstractmethod
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
"""Set the binding for the port.
:param segment_id: Network segment bound for the port.
:param vif_type: The VIF type for the bound port.
:param vif_details: Dictionary with details for VIF driver.
:param status: Port status to set if not None.
Called by MechanismDriver.bind_port to indicate success and
specify binding details to use for port. The segment_id must
identify an item in network.network_segments.
"""
pass
@six.add_metaclass(ABCMeta)
class MechanismDriver(object):
"""Define stable abstract interface for ML2 mechanism drivers.
A mechanism driver is called on the creation, update, and deletion
of networks and ports. For every event, there are two methods that
get called - one within the database transaction (method suffix of
_precommit), one right afterwards (method suffix of _postcommit).
Exceptions raised by methods called inside the transaction can
rollback, but should not make any blocking calls (for example,
REST requests to an outside controller). Methods called after
transaction commits can make blocking external calls, though these
will block the entire process. Exceptions raised in calls after
the transaction commits may cause the associated resource to be
deleted.
Because rollback outside of the transaction is not done in the
update network/port case, all data validation must be done within
methods that are part of the database transaction.
"""
@abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
def create_network_precommit(self, context):
"""Allocate resources for a new network.
:param context: NetworkContext instance describing the new
network.
Create a new network, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_network_postcommit(self, context):
"""Create a network.
:param context: NetworkContext instance describing the new
network.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_network_precommit(self, context):
"""Update resources of a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Update values of a network, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_network_precommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_network_postcommit(self, context):
"""Update a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_network_postcommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_network_precommit(self, context):
"""Delete resources for a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Delete network resources previously allocated by this
mechanism driver for a network. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_network_postcommit(self, context):
"""Delete a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_subnet_precommit(self, context):
"""Allocate resources for a new subnet.
:param context: SubnetContext instance describing the new
subnet.
Create a new subnet, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_subnet_postcommit(self, context):
"""Create a subnet.
:param context: SubnetContext instance describing the new
subnet.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_subnet_precommit(self, context):
"""Update resources of a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Update values of a subnet, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_subnet_precommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_subnet_postcommit(self, context):
"""Update a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_subnet_postcommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_subnet_precommit(self, context):
"""Delete resources for a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Delete subnet resources previously allocated by this
mechanism driver for a subnet. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_subnet_postcommit(self, context):
"""Delete a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_port_precommit(self, context):
"""Allocate resources for a new port.
:param context: PortContext instance describing the port.
Create a new port, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_port_postcommit(self, context):
"""Create a port.
:param context: PortContext instance describing the port.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
"""
pass
def update_port_precommit(self, context):
"""Update resources of a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called inside transaction context on session to complete a
port update as defined by this mechanism driver. Raising an
exception will result in rollback of the transaction.
update_port_precommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def update_port_postcommit(self, context):
"""Update a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
update_port_postcommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def delete_port_precommit(self, context):
"""Delete resources of a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called inside transaction context on session. Runtime errors
are not expected, but raising an exception will result in
rollback of the transaction.
"""
pass
def delete_port_postcommit(self, context):
"""Delete a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def bind_port(self, context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
Called inside transaction context on session, prior to
create_port_precommit or update_port_precommit, to
attempt to establish a port binding. If the driver is able to
bind the port, it calls context.set_binding with the binding
details.
"""
pass
| sajuptpm/neutron-ipam | neutron/plugins/ml2/driver_api.py | Python | apache-2.0 | 21,394 | 0 |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
from pyasn1 import debug
from pyasn1 import error
from pyasn1.codec.ber import eoo
from pyasn1.compat.integer import from_bytes
from pyasn1.compat.octets import oct2int, octs2ints, ints2octs, null
from pyasn1.type import base
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import tagmap
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['decode']
LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
noValue = base.noValue
class AbstractDecoder(object):
protoComponent = None
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
class AbstractSimpleDecoder(AbstractDecoder):
@staticmethod
def substrateCollector(asn1Object, substrate, length):
return substrate[:length], substrate[length:]
def _createComponent(self, asn1Spec, tagSet, value, **options):
if options.get('native'):
return value
elif asn1Spec is None:
return self.protoComponent.clone(value, tagSet=tagSet)
elif value is noValue:
return asn1Spec
else:
return asn1Spec.clone(value)
class ExplicitTagDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any('')
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, '', **options),
substrate, length
)
head, tail = substrate[:length], substrate[length:]
value, _ = decodeFun(head, asn1Spec, tagSet, length, **options)
if LOG:
LOG('explicit tag container carries %d octets of trailing payload '
'(will be lost!): %s' % (len(_), debug.hexdump(_)))
return value, tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, '', **options),
substrate, length
)
value, substrate = decodeFun(substrate, asn1Spec, tagSet, length, **options)
eooMarker, substrate = decodeFun(substrate, allowEoo=True, **options)
if eooMarker is eoo.endOfOctets:
return value, substrate
else:
raise error.PyAsn1Error('Missing end-of-octets terminator')
explicitTagDecoder = ExplicitTagDecoder()
class IntegerDecoder(AbstractSimpleDecoder):
protoComponent = univ.Integer(0)
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatSimple:
raise error.PyAsn1Error('Simple tag format expected')
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0, **options), tail
value = from_bytes(head, signed=True)
return self._createComponent(asn1Spec, tagSet, value, **options), tail
class BooleanDecoder(IntegerDecoder):
protoComponent = univ.Boolean(0)
def _createComponent(self, asn1Spec, tagSet, value, **options):
return IntegerDecoder._createComponent(
self, asn1Spec, tagSet, value and 1 or 0, **options)
class BitStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.BitString(())
supportConstructedForm = True
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
head, tail = substrate[:length], substrate[length:]
if substrateFun:
return substrateFun(self._createComponent(
asn1Spec, tagSet, noValue, **options), substrate, length)
if not head:
raise error.PyAsn1Error('Empty BIT STRING substrate')
if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
trailingBits = oct2int(head[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
value = self.protoComponent.fromOctetString(
head[1:], internalFormat=True, padding=trailingBits)
return self._createComponent(asn1Spec, tagSet, value, **options), tail
if not self.supportConstructedForm:
raise error.PyAsn1Error('Constructed encoding form prohibited '
'at %s' % self.__class__.__name__)
if LOG:
LOG('assembling constructed serialization')
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
while head:
component, head = decodeFun(head, self.protoComponent,
substrateFun=substrateFun, **options)
trailingBits = oct2int(component[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
bitString = self.protoComponent.fromOctetString(
component[1:], internalFormat=True,
prepend=bitString, padding=trailingBits
)
return self._createComponent(asn1Spec, tagSet, bitString, **options), tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun:
return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length)
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
while substrate:
component, substrate = decodeFun(substrate, self.protoComponent,
substrateFun=substrateFun,
allowEoo=True, **options)
if component is eoo.endOfOctets:
break
trailingBits = oct2int(component[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
bitString = self.protoComponent.fromOctetString(
component[1:], internalFormat=True,
prepend=bitString, padding=trailingBits
)
else:
raise error.SubstrateUnderrunError('No EOO seen before substrate ends')
return self._createComponent(asn1Spec, tagSet, bitString, **options), substrate
class OctetStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.OctetString('')
supportConstructedForm = True
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
head, tail = substrate[:length], substrate[length:]
if substrateFun:
return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options),
substrate, length)
if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
return self._createComponent(asn1Spec, tagSet, head, **options), tail
if not self.supportConstructedForm:
raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__)
if LOG:
LOG('assembling constructed serialization')
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
header = null
while head:
component, head = decodeFun(head, self.protoComponent,
substrateFun=substrateFun,
**options)
header += component
return self._createComponent(asn1Spec, tagSet, header, **options), tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if substrateFun and substrateFun is not self.substrateCollector:
asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
return substrateFun(asn1Object, substrate, length)
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
header = null
while substrate:
component, substrate = decodeFun(substrate,
self.protoComponent,
substrateFun=substrateFun,
allowEoo=True, **options)
if component is eoo.endOfOctets:
break
header += component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return self._createComponent(asn1Spec, tagSet, header, **options), substrate
class NullDecoder(AbstractSimpleDecoder):
protoComponent = univ.Null('')
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatSimple:
raise error.PyAsn1Error('Simple tag format expected')
head, tail = substrate[:length], substrate[length:]
component = self._createComponent(asn1Spec, tagSet, '', **options)
if head:
raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
return component, tail
class ObjectIdentifierDecoder(AbstractSimpleDecoder):
protoComponent = univ.ObjectIdentifier(())
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatSimple:
raise error.PyAsn1Error('Simple tag format expected')
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
head = octs2ints(head)
oid = ()
index = 0
substrateLen = len(head)
while index < substrateLen:
subId = head[index]
index += 1
if subId < 128:
oid += (subId,)
elif subId > 128:
# Construct subid from a number of octets
nextSubId = subId
subId = 0
while nextSubId >= 128:
subId = (subId << 7) + (nextSubId & 0x7F)
if index >= substrateLen:
raise error.SubstrateUnderrunError(
'Short substrate for sub-OID past %s' % (oid,)
)
nextSubId = head[index]
index += 1
oid += ((subId << 7) + nextSubId,)
elif subId == 128:
# ASN.1 spec forbids leading zeros (0x80) in OID
# encoding, tolerating it opens a vulnerability. See
# https://www.esat.kuleuven.be/cosic/publications/article-1432.pdf
# page 7
raise error.PyAsn1Error('Invalid octet 0x80 in OID encoding')
# Decode two leading arcs
if 0 <= oid[0] <= 39:
oid = (0,) + oid
elif 40 <= oid[0] <= 79:
oid = (1, oid[0] - 40) + oid[1:]
elif oid[0] >= 80:
oid = (2, oid[0] - 80) + oid[1:]
else:
raise error.PyAsn1Error('Malformed first OID octet: %s' % head[0])
return self._createComponent(asn1Spec, tagSet, oid, **options), tail
class RealDecoder(AbstractSimpleDecoder):
protoComponent = univ.Real()
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatSimple:
raise error.PyAsn1Error('Simple tag format expected')
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0.0, **options), tail
fo = oct2int(head[0])
head = head[1:]
if fo & 0x80: # binary encoding
if not head:
raise error.PyAsn1Error("Incomplete floating-point value")
if LOG:
LOG('decoding binary encoded REAL')
n = (fo & 0x03) + 1
if n == 4:
n = oct2int(head[0])
head = head[1:]
eo, head = head[:n], head[n:]
if not eo or not head:
raise error.PyAsn1Error('Real exponent screwed')
e = oct2int(eo[0]) & 0x80 and -1 or 0
while eo: # exponent
e <<= 8
e |= oct2int(eo[0])
eo = eo[1:]
b = fo >> 4 & 0x03 # base bits
if b > 2:
raise error.PyAsn1Error('Illegal Real base')
if b == 1: # encbase = 8
e *= 3
elif b == 2: # encbase = 16
e *= 4
p = 0
while head: # value
p <<= 8
p |= oct2int(head[0])
head = head[1:]
if fo & 0x40: # sign bit
p = -p
sf = fo >> 2 & 0x03 # scale bits
p *= 2 ** sf
value = (p, 2, e)
elif fo & 0x40: # infinite value
if LOG:
LOG('decoding infinite REAL')
value = fo & 0x01 and '-inf' or 'inf'
elif fo & 0xc0 == 0: # character encoding
if not head:
raise error.PyAsn1Error("Incomplete floating-point value")
if LOG:
LOG('decoding character encoded REAL')
try:
if fo & 0x3 == 0x1: # NR1
value = (int(head), 10, 0)
elif fo & 0x3 == 0x2: # NR2
value = float(head)
elif fo & 0x3 == 0x3: # NR3
value = float(head)
else:
raise error.SubstrateUnderrunError(
'Unknown NR (tag %s)' % fo
)
except ValueError:
raise error.SubstrateUnderrunError(
'Bad character Real syntax'
)
else:
raise error.SubstrateUnderrunError(
'Unknown encoding (tag %s)' % fo
)
return self._createComponent(asn1Spec, tagSet, value, **options), tail
class AbstractConstructedDecoder(AbstractDecoder):
protoComponent = None
class UniversalConstructedTypeDecoder(AbstractConstructedDecoder):
protoRecordComponent = None
protoSequenceComponent = None
def _getComponentTagMap(self, asn1Object, idx):
raise NotImplementedError()
def _getComponentPositionByType(self, asn1Object, tagSet, idx):
raise NotImplementedError()
def _decodeComponents(self, substrate, tagSet=None, decodeFun=None, **options):
components = []
componentTypes = set()
while substrate:
component, substrate = decodeFun(substrate, **options)
if component is eoo.endOfOctets:
break
components.append(component)
componentTypes.add(component.tagSet)
# Now we have to guess is it SEQUENCE/SET or SEQUENCE OF/SET OF
# The heuristics is:
# * 1+ components of different types -> likely SEQUENCE/SET
# * otherwise -> likely SEQUENCE OF/SET OF
if len(componentTypes) > 1:
protoComponent = self.protoRecordComponent
else:
protoComponent = self.protoSequenceComponent
asn1Object = protoComponent.clone(
# construct tagSet from base tag from prototype ASN.1 object
# and additional tags recovered from the substrate
tagSet=tag.TagSet(protoComponent.tagSet.baseTag, *tagSet.superTags)
)
if LOG:
LOG('guessed %r container type (pass `asn1Spec` to guide the '
'decoder)' % asn1Object)
for idx, component in enumerate(components):
asn1Object.setComponentByPosition(
idx, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False
)
return asn1Object, substrate
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatConstructed:
raise error.PyAsn1Error('Constructed tag format expected')
head, tail = substrate[:length], substrate[length:]
if substrateFun is not None:
if asn1Spec is not None:
asn1Object = asn1Spec.clone()
elif self.protoComponent is not None:
asn1Object = self.protoComponent.clone(tagSet=tagSet)
else:
asn1Object = self.protoRecordComponent, self.protoSequenceComponent
return substrateFun(asn1Object, substrate, length)
if asn1Spec is None:
asn1Object, trailing = self._decodeComponents(
head, tagSet=tagSet, decodeFun=decodeFun, **options
)
if trailing:
if LOG:
LOG('Unused trailing %d octets encountered: %s' % (
len(trailing), debug.hexdump(trailing)))
return asn1Object, tail
asn1Object = asn1Spec.clone()
asn1Object.clear()
if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
namedTypes = asn1Spec.componentType
isSetType = asn1Spec.typeId == univ.Set.typeId
isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
if LOG:
LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
asn1Spec))
seenIndices = set()
idx = 0
while head:
if not namedTypes:
componentType = None
elif isSetType:
componentType = namedTypes.tagMapUnique
else:
try:
if isDeterministic:
componentType = namedTypes[idx].asn1Object
elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
componentType = namedTypes.getTagMapNearPosition(idx)
else:
componentType = namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error(
'Excessive components decoded at %r' % (asn1Spec,)
)
component, head = decodeFun(head, componentType, **options)
if not isDeterministic and namedTypes:
if isSetType:
idx = namedTypes.getPositionByType(component.effectiveTagSet)
elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
asn1Object.setComponentByPosition(
idx, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False
)
seenIndices.add(idx)
idx += 1
if LOG:
LOG('seen component indices %s' % seenIndices)
if namedTypes:
if not namedTypes.requiredComponents.issubset(seenIndices):
raise error.PyAsn1Error(
'ASN.1 object %s has uninitialized '
'components' % asn1Object.__class__.__name__)
if namedTypes.hasOpenTypes:
openTypes = options.get('openTypes', {})
if LOG:
LOG('using open types map: %r' % openTypes)
if openTypes or options.get('decodeOpenTypes', False):
for idx, namedType in enumerate(namedTypes.namedTypes):
if not namedType.openType:
continue
if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
continue
governingValue = asn1Object.getComponentByName(
namedType.openType.name
)
try:
openType = openTypes[governingValue]
except KeyError:
try:
openType = namedType.openType[governingValue]
except KeyError:
if LOG:
LOG('failed to resolve open type by governing '
'value %r' % (governingValue,))
continue
if LOG:
LOG('resolved open type %r by governing '
'value %r' % (openType, governingValue))
containerValue = asn1Object.getComponentByPosition(idx)
if containerValue.typeId in (
univ.SetOf.typeId, univ.SequenceOf.typeId):
for pos, containerElement in enumerate(
containerValue):
component, rest = decodeFun(
containerValue[pos].asOctets(),
asn1Spec=openType, **options
)
containerValue[pos] = component
else:
component, rest = decodeFun(
asn1Object.getComponentByPosition(idx).asOctets(),
asn1Spec=openType, **options
)
asn1Object.setComponentByPosition(idx, component)
else:
asn1Object.verifySizeSpec()
else:
asn1Object = asn1Spec.clone()
asn1Object.clear()
componentType = asn1Spec.componentType
if LOG:
LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
idx = 0
while head:
component, head = decodeFun(head, componentType, **options)
asn1Object.setComponentByPosition(
idx, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False
)
idx += 1
return asn1Object, tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if tagSet[0].tagFormat != tag.tagFormatConstructed:
raise error.PyAsn1Error('Constructed tag format expected')
if substrateFun is not None:
if asn1Spec is not None:
asn1Object = asn1Spec.clone()
elif self.protoComponent is not None:
asn1Object = self.protoComponent.clone(tagSet=tagSet)
else:
asn1Object = self.protoRecordComponent, self.protoSequenceComponent
return substrateFun(asn1Object, substrate, length)
if asn1Spec is None:
return self._decodeComponents(
substrate, tagSet=tagSet, decodeFun=decodeFun,
**dict(options, allowEoo=True)
)
asn1Object = asn1Spec.clone()
asn1Object.clear()
if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
namedTypes = asn1Object.componentType
isSetType = asn1Object.typeId == univ.Set.typeId
isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
if LOG:
LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
asn1Spec))
seenIndices = set()
idx = 0
while substrate:
if len(namedTypes) <= idx:
asn1Spec = None
elif isSetType:
asn1Spec = namedTypes.tagMapUnique
else:
try:
if isDeterministic:
asn1Spec = namedTypes[idx].asn1Object
elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
asn1Spec = namedTypes.getTagMapNearPosition(idx)
else:
asn1Spec = namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error(
'Excessive components decoded at %r' % (asn1Object,)
)
component, substrate = decodeFun(substrate, asn1Spec, allowEoo=True, **options)
if component is eoo.endOfOctets:
break
if not isDeterministic and namedTypes:
if isSetType:
idx = namedTypes.getPositionByType(component.effectiveTagSet)
elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
asn1Object.setComponentByPosition(
idx, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False
)
seenIndices.add(idx)
idx += 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
if LOG:
LOG('seen component indices %s' % seenIndices)
if namedTypes:
if not namedTypes.requiredComponents.issubset(seenIndices):
raise error.PyAsn1Error('ASN.1 object %s has uninitialized components' % asn1Object.__class__.__name__)
if namedTypes.hasOpenTypes:
openTypes = options.get('openTypes', {})
if LOG:
LOG('using open types map: %r' % openTypes)
if openTypes or options.get('decodeOpenTypes', False):
for idx, namedType in enumerate(namedTypes.namedTypes):
if not namedType.openType:
continue
if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
continue
governingValue = asn1Object.getComponentByName(
namedType.openType.name
)
try:
openType = openTypes[governingValue]
except KeyError:
try:
openType = namedType.openType[governingValue]
except KeyError:
if LOG:
LOG('failed to resolve open type by governing '
'value %r' % (governingValue,))
continue
if LOG:
LOG('resolved open type %r by governing '
'value %r' % (openType, governingValue))
containerValue = asn1Object.getComponentByPosition(idx)
if containerValue.typeId in (
univ.SetOf.typeId, univ.SequenceOf.typeId):
for pos, containerElement in enumerate(
containerValue):
component, rest = decodeFun(
containerValue[pos].asOctets(),
asn1Spec=openType, **dict(options, allowEoo=True)
)
containerValue[pos] = component
else:
component, rest = decodeFun(
asn1Object.getComponentByPosition(idx).asOctets(),
asn1Spec=openType, **dict(options, allowEoo=True)
)
if component is not eoo.endOfOctets:
asn1Object.setComponentByPosition(idx, component)
else:
asn1Object.verifySizeSpec()
else:
asn1Object = asn1Spec.clone()
asn1Object.clear()
componentType = asn1Spec.componentType
if LOG:
LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
idx = 0
while substrate:
component, substrate = decodeFun(substrate, componentType, allowEoo=True, **options)
if component is eoo.endOfOctets:
break
asn1Object.setComponentByPosition(
idx, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False
)
idx += 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return asn1Object, substrate
class SequenceOrSequenceOfDecoder(UniversalConstructedTypeDecoder):
protoRecordComponent = univ.Sequence()
protoSequenceComponent = univ.SequenceOf()
class SequenceDecoder(SequenceOrSequenceOfDecoder):
protoComponent = univ.Sequence()
class SequenceOfDecoder(SequenceOrSequenceOfDecoder):
protoComponent = univ.SequenceOf()
class SetOrSetOfDecoder(UniversalConstructedTypeDecoder):
protoRecordComponent = univ.Set()
protoSequenceComponent = univ.SetOf()
class SetDecoder(SetOrSetOfDecoder):
protoComponent = univ.Set()
class SetOfDecoder(SetOrSetOfDecoder):
protoComponent = univ.SetOf()
class ChoiceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Choice()
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
head, tail = substrate[:length], substrate[length:]
if asn1Spec is None:
asn1Object = self.protoComponent.clone(tagSet=tagSet)
else:
asn1Object = asn1Spec.clone()
if substrateFun:
return substrateFun(asn1Object, substrate, length)
if asn1Object.tagSet == tagSet:
if LOG:
LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,))
component, head = decodeFun(
head, asn1Object.componentTagMap, **options
)
else:
if LOG:
LOG('decoding %s as untagged CHOICE' % (tagSet,))
component, head = decodeFun(
head, asn1Object.componentTagMap,
tagSet, length, state, **options
)
effectiveTagSet = component.effectiveTagSet
if LOG:
LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet))
asn1Object.setComponentByType(
effectiveTagSet, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False,
innerFlag=False
)
return asn1Object, tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if asn1Spec is None:
asn1Object = self.protoComponent.clone(tagSet=tagSet)
else:
asn1Object = asn1Spec.clone()
if substrateFun:
return substrateFun(asn1Object, substrate, length)
if asn1Object.tagSet == tagSet:
if LOG:
LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,))
component, substrate = decodeFun(
substrate, asn1Object.componentType.tagMapUnique, **options
)
# eat up EOO marker
eooMarker, substrate = decodeFun(
substrate, allowEoo=True, **options
)
if eooMarker is not eoo.endOfOctets:
raise error.PyAsn1Error('No EOO seen before substrate ends')
else:
if LOG:
LOG('decoding %s as untagged CHOICE' % (tagSet,))
component, substrate = decodeFun(
substrate, asn1Object.componentType.tagMapUnique,
tagSet, length, state, **options
)
effectiveTagSet = component.effectiveTagSet
if LOG:
LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet))
asn1Object.setComponentByType(
effectiveTagSet, component,
verifyConstraints=False,
matchTags=False, matchConstraints=False,
innerFlag=False
)
return asn1Object, substrate
class AnyDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any()
def valueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if asn1Spec is None:
isUntagged = True
elif asn1Spec.__class__ is tagmap.TagMap:
isUntagged = tagSet not in asn1Spec.tagMap
else:
isUntagged = tagSet != asn1Spec.tagSet
if isUntagged:
fullSubstrate = options['fullSubstrate']
# untagged Any container, recover inner header substrate
length += len(fullSubstrate) - len(substrate)
substrate = fullSubstrate
if LOG:
LOG('decoding as untagged ANY, substrate %s' % debug.hexdump(substrate))
if substrateFun:
return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options),
substrate, length)
head, tail = substrate[:length], substrate[length:]
return self._createComponent(asn1Spec, tagSet, head, **options), tail
def indefLenValueDecoder(self, substrate, asn1Spec,
tagSet=None, length=None, state=None,
decodeFun=None, substrateFun=None,
**options):
if asn1Spec is None:
isTagged = False
elif asn1Spec.__class__ is tagmap.TagMap:
isTagged = tagSet in asn1Spec.tagMap
else:
isTagged = tagSet == asn1Spec.tagSet
if isTagged:
# tagged Any type -- consume header substrate
header = null
if LOG:
LOG('decoding as tagged ANY')
else:
fullSubstrate = options['fullSubstrate']
# untagged Any, recover header substrate
header = fullSubstrate[:-len(substrate)]
if LOG:
LOG('decoding as untagged ANY, header substrate %s' % debug.hexdump(header))
# Any components do not inherit initial tag
asn1Spec = self.protoComponent
if substrateFun and substrateFun is not self.substrateCollector:
asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
return substrateFun(asn1Object, header + substrate, length + len(header))
if LOG:
LOG('assembling constructed serialization')
# All inner fragments are of the same type, treat them as octet string
substrateFun = self.substrateCollector
while substrate:
component, substrate = decodeFun(substrate, asn1Spec,
substrateFun=substrateFun,
allowEoo=True, **options)
if component is eoo.endOfOctets:
break
header += component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
if substrateFun:
return header, substrate
else:
return self._createComponent(asn1Spec, tagSet, header, **options), substrate
# character string types
class UTF8StringDecoder(OctetStringDecoder):
protoComponent = char.UTF8String()
class NumericStringDecoder(OctetStringDecoder):
protoComponent = char.NumericString()
class PrintableStringDecoder(OctetStringDecoder):
protoComponent = char.PrintableString()
class TeletexStringDecoder(OctetStringDecoder):
protoComponent = char.TeletexString()
class VideotexStringDecoder(OctetStringDecoder):
protoComponent = char.VideotexString()
class IA5StringDecoder(OctetStringDecoder):
protoComponent = char.IA5String()
class GraphicStringDecoder(OctetStringDecoder):
protoComponent = char.GraphicString()
class VisibleStringDecoder(OctetStringDecoder):
protoComponent = char.VisibleString()
class GeneralStringDecoder(OctetStringDecoder):
protoComponent = char.GeneralString()
class UniversalStringDecoder(OctetStringDecoder):
protoComponent = char.UniversalString()
class BMPStringDecoder(OctetStringDecoder):
protoComponent = char.BMPString()
# "useful" types
class ObjectDescriptorDecoder(OctetStringDecoder):
protoComponent = useful.ObjectDescriptor()
class GeneralizedTimeDecoder(OctetStringDecoder):
protoComponent = useful.GeneralizedTime()
class UTCTimeDecoder(OctetStringDecoder):
protoComponent = useful.UTCTime()
tagMap = {
univ.Integer.tagSet: IntegerDecoder(),
univ.Boolean.tagSet: BooleanDecoder(),
univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
univ.Null.tagSet: NullDecoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(),
univ.Enumerated.tagSet: IntegerDecoder(),
univ.Real.tagSet: RealDecoder(),
univ.Sequence.tagSet: SequenceOrSequenceOfDecoder(), # conflicts with SequenceOf
univ.Set.tagSet: SetOrSetOfDecoder(), # conflicts with SetOf
univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
# character string types
char.UTF8String.tagSet: UTF8StringDecoder(),
char.NumericString.tagSet: NumericStringDecoder(),
char.PrintableString.tagSet: PrintableStringDecoder(),
char.TeletexString.tagSet: TeletexStringDecoder(),
char.VideotexString.tagSet: VideotexStringDecoder(),
char.IA5String.tagSet: IA5StringDecoder(),
char.GraphicString.tagSet: GraphicStringDecoder(),
char.VisibleString.tagSet: VisibleStringDecoder(),
char.GeneralString.tagSet: GeneralStringDecoder(),
char.UniversalString.tagSet: UniversalStringDecoder(),
char.BMPString.tagSet: BMPStringDecoder(),
# useful types
useful.ObjectDescriptor.tagSet: ObjectDescriptorDecoder(),
useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(),
useful.UTCTime.tagSet: UTCTimeDecoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SetDecoder(),
univ.SetOf.typeId: SetOfDecoder(),
univ.Sequence.typeId: SequenceDecoder(),
univ.SequenceOf.typeId: SequenceOfDecoder(),
univ.Choice.typeId: ChoiceDecoder(),
univ.Any.typeId: AnyDecoder()
}
# Put in non-ambiguous types for faster codec lookup
for typeDecoder in tagMap.values():
if typeDecoder.protoComponent is not None:
typeId = typeDecoder.protoComponent.__class__.typeId
if typeId is not None and typeId not in typeMap:
typeMap[typeId] = typeDecoder
(stDecodeTag,
stDecodeLength,
stGetValueDecoder,
stGetValueDecoderByAsn1Spec,
stGetValueDecoderByTag,
stTryAsExplicitTag,
stDecodeValue,
stDumpRawValue,
stErrorCondition,
stStop) = [x for x in range(10)]
class Decoder(object):
defaultErrorState = stErrorCondition
#defaultErrorState = stDumpRawValue
defaultRawDecoder = AnyDecoder()
supportIndefLength = True
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
# Tag & TagSet objects caches
self.__tagCache = {}
self.__tagSetCache = {}
self.__eooSentinel = ints2octs((0, 0))
def __call__(self, substrate, asn1Spec=None,
tagSet=None, length=None, state=stDecodeTag,
decodeFun=None, substrateFun=None,
**options):
if LOG:
LOG('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate)))
allowEoo = options.pop('allowEoo', False)
# Look for end-of-octets sentinel
if allowEoo and self.supportIndefLength:
if substrate[:2] == self.__eooSentinel:
if LOG:
LOG('end-of-octets sentinel found')
return eoo.endOfOctets, substrate[2:]
value = noValue
tagMap = self.__tagMap
typeMap = self.__typeMap
tagCache = self.__tagCache
tagSetCache = self.__tagSetCache
fullSubstrate = substrate
while state is not stStop:
if state is stDecodeTag:
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on tag decoding'
)
# Decode tag
isShortTag = True
firstOctet = substrate[0]
substrate = substrate[1:]
try:
lastTag = tagCache[firstOctet]
except KeyError:
integerTag = oct2int(firstOctet)
tagClass = integerTag & 0xC0
tagFormat = integerTag & 0x20
tagId = integerTag & 0x1F
if tagId == 0x1F:
isShortTag = False
lengthOctetIdx = 0
tagId = 0
try:
while True:
integerTag = oct2int(substrate[lengthOctetIdx])
lengthOctetIdx += 1
tagId <<= 7
tagId |= (integerTag & 0x7F)
if not integerTag & 0x80:
break
substrate = substrate[lengthOctetIdx:]
except IndexError:
raise error.SubstrateUnderrunError(
'Short octet stream on long tag decoding'
)
lastTag = tag.Tag(
tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
)
if isShortTag:
# cache short tags
tagCache[firstOctet] = lastTag
if tagSet is None:
if isShortTag:
try:
tagSet = tagSetCache[firstOctet]
except KeyError:
# base tag not recovered
tagSet = tag.TagSet((), lastTag)
tagSetCache[firstOctet] = tagSet
else:
tagSet = tag.TagSet((), lastTag)
else:
tagSet = lastTag + tagSet
state = stDecodeLength
if LOG:
LOG('tag decoded into %s, decoding length' % tagSet)
if state is stDecodeLength:
# Decode length
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on length decoding'
)
firstOctet = oct2int(substrate[0])
if firstOctet < 128:
size = 1
length = firstOctet
elif firstOctet > 128:
size = firstOctet & 0x7F
# encoded in size bytes
encodedLength = octs2ints(substrate[1:size + 1])
# missing check on maximum size, which shouldn't be a
# problem, we can handle more than is possible
if len(encodedLength) != size:
raise error.SubstrateUnderrunError(
'%s<%s at %s' % (size, len(encodedLength), tagSet)
)
length = 0
for lengthOctet in encodedLength:
length <<= 8
length |= lengthOctet
size += 1
else:
size = 1
length = -1
substrate = substrate[size:]
if length == -1:
if not self.supportIndefLength:
raise error.PyAsn1Error('Indefinite length encoding not supported by this codec')
else:
if len(substrate) < length:
raise error.SubstrateUnderrunError('%d-octet short' % (length - len(substrate)))
state = stGetValueDecoder
if LOG:
LOG('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length])))
if state is stGetValueDecoder:
if asn1Spec is None:
state = stGetValueDecoderByTag
else:
state = stGetValueDecoderByAsn1Spec
#
# There're two ways of creating subtypes in ASN.1 what influences
# decoder operation. These methods are:
# 1) Either base types used in or no IMPLICIT tagging has been
# applied on subtyping.
# 2) Subtype syntax drops base type information (by means of
# IMPLICIT tagging.
# The first case allows for complete tag recovery from substrate
# while the second one requires original ASN.1 type spec for
# decoding.
#
# In either case a set of tags (tagSet) is coming from substrate
# in an incremental, tag-by-tag fashion (this is the case of
# EXPLICIT tag which is most basic). Outermost tag comes first
# from the wire.
#
if state is stGetValueDecoderByTag:
try:
concreteDecoder = tagMap[tagSet]
except KeyError:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
try:
concreteDecoder = tagMap[tagSet[:1]]
except KeyError:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
state = stTryAsExplicitTag
if LOG:
LOG('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
if state is stGetValueDecoderByAsn1Spec:
if asn1Spec.__class__ is tagmap.TagMap:
try:
chosenSpec = asn1Spec[tagSet]
except KeyError:
chosenSpec = None
if LOG:
LOG('candidate ASN.1 spec is a map of:')
for firstOctet, v in asn1Spec.presentTypes.items():
LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
if asn1Spec.skipTypes:
LOG('but neither of: ')
for firstOctet, v in asn1Spec.skipTypes.items():
LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
LOG('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '<none>' or chosenSpec.prettyPrintType(), tagSet))
elif tagSet == asn1Spec.tagSet or tagSet in asn1Spec.tagMap:
chosenSpec = asn1Spec
if LOG:
LOG('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
else:
chosenSpec = None
if chosenSpec is not None:
try:
# ambiguous type or just faster codec lookup
concreteDecoder = typeMap[chosenSpec.typeId]
if LOG:
LOG('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,))
except KeyError:
# use base type for codec lookup to recover untagged types
baseTagSet = tag.TagSet(chosenSpec.tagSet.baseTag, chosenSpec.tagSet.baseTag)
try:
# base type or tagged subtype
concreteDecoder = tagMap[baseTagSet]
if LOG:
LOG('value decoder chosen by base %s' % (baseTagSet,))
except KeyError:
concreteDecoder = None
if concreteDecoder:
asn1Spec = chosenSpec
state = stDecodeValue
else:
state = stTryAsExplicitTag
else:
concreteDecoder = None
state = stTryAsExplicitTag
if LOG:
LOG('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
debug.scope.push(chosenSpec is None and '?' or chosenSpec.__class__.__name__)
if state is stDecodeValue:
if not options.get('recursiveFlag', True) and not substrateFun: # deprecate this
substrateFun = lambda a, b, c: (a, b[:c])
options.update(fullSubstrate=fullSubstrate)
if length == -1: # indef length
value, substrate = concreteDecoder.indefLenValueDecoder(
substrate, asn1Spec,
tagSet, length, stGetValueDecoder,
self, substrateFun,
**options
)
else:
value, substrate = concreteDecoder.valueDecoder(
substrate, asn1Spec,
tagSet, length, stGetValueDecoder,
self, substrateFun,
**options
)
if LOG:
LOG('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, isinstance(value, base.Asn1Item) and value.prettyPrint() or value, substrate and debug.hexdump(substrate) or '<none>'))
state = stStop
break
if state is stTryAsExplicitTag:
if (tagSet and
tagSet[0].tagFormat == tag.tagFormatConstructed and
tagSet[0].tagClass != tag.tagClassUniversal):
# Assume explicit tagging
concreteDecoder = explicitTagDecoder
state = stDecodeValue
else:
concreteDecoder = None
state = self.defaultErrorState
if LOG:
LOG('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as failure'))
if state is stDumpRawValue:
concreteDecoder = self.defaultRawDecoder
if LOG:
LOG('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
state = stDecodeValue
if state is stErrorCondition:
raise error.PyAsn1Error(
'%s not in asn1Spec: %r' % (tagSet, asn1Spec)
)
if LOG:
debug.scope.pop()
LOG('decoder left scope %s, call completed' % debug.scope)
return value, substrate
#: Turns BER octet stream into an ASN.1 object.
#:
#: Takes BER octet-stream and decode it into an ASN.1 object
#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
#: may be a scalar or an arbitrary nested structure.
#:
#: Parameters
#: ----------
#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: BER octet-stream
#:
#: Keyword Args
#: ------------
#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
#: being decoded, *asn1Spec* may or may not be required. Most common reason for
#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
#:
#: Returns
#: -------
#: : :py:class:`tuple`
#: A tuple of pyasn1 object recovered from BER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: and the unprocessed trailing portion of the *substrate* (may be empty)
#:
#: Raises
#: ------
#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
#: On decoding errors
#:
#: Examples
#: --------
#: Decode BER serialisation without ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
#: >>> str(s)
#: SequenceOf:
#: 1 2 3
#:
#: Decode BER serialisation with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
#: >>> str(s)
#: SequenceOf:
#: 1 2 3
#:
decode = Decoder(tagMap, typeMap)
# XXX
# non-recursive decoding; return position rather than substrate
| kawamon/hue | desktop/core/ext-py/pyasn1-0.4.6/pyasn1/codec/ber/decoder.py | Python | apache-2.0 | 58,050 | 0.001602 |
import zmq
ctx = zmq.Context.instance()
server = ctx.socket(zmq.PUSH)
server.bind('inproc://foo')
clients = [ctx.socket(zmq.PULL) for i in range(10)]
poller = zmq.Poller()
for client in clients:
client.connect('inproc://foo')
poller.register(client, zmq.POLLIN)
for client in clients:
server.send(b'DATA')
for sock, flags in poller.poll(0):
print(sock, repr(sock.recv()))
| waveform80/presentations | concurrency/demo3.py | Python | cc0-1.0 | 390 | 0 |
"""myproject
"""
__author__ = 'myproject:author_name'
__email__ = 'myproject:author_email'
#----------------------------------------------------------------------
def hello_world(extend_hello=False):
"""prints hello world
:returns: None
:rtype: None
"""
print 'Hello World!{}'.format(' Beautiful World!' if extend_hello else '')
| diszgaurav/projecture | projecture/projects/python/myproject/myproject/myproject.py | Python | mit | 353 | 0.005666 |
from common import * # NOQA
SERVICE = 'com.docker.compose.service'
PROJECT = 'com.docker.compose.project'
NUMBER = 'com.docker.compose.container-number'
def test_container_create_count(client, context):
project, service, c = _create_service(client, context)
assert c.labels['io.rancher.service.deployment.unit'] is not None
assert c.labels['io.rancher.service.launch.config'] == \
'io.rancher.service.primary.launch.config'
assert c.labels['io.rancher.stack_service.name'] == project + '/' + service
assert c.labels['io.rancher.stack.name'] == project
s = find_one(c.services)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert s.name == service
assert s.type == 'composeService'
assert s.kind == 'composeService'
assert s.state == 'active'
assert s.state == 'active'
selector = 'com.docker.compose.project={}, ' \
'com.docker.compose.service={}'.format(project, service)
assert s.selectorContainer == selector
assert env.name == project
assert env.state == 'active'
assert env.type == 'composeProject'
assert env.kind == 'composeProject'
assert set(env.actions.keys()) == set(['remove'])
assert set(s.actions.keys()) == set(['remove'])
def _create_service(client, context, project=None, service=None):
if project is None:
project = 'p-' + random_str()
if service is None:
service = 's-' + random_str()
c = context.create_container(name='{}_{}_1'.format(service, project),
labels={
SERVICE: service,
PROJECT: project,
NUMBER: '1',
}, networkMode='none')
assert c.state == 'running'
return project, service, c
def test_container_remove(client, context):
project, service, c = _create_service(client, context)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
c = client.delete(c)
c = client.wait_success(c)
assert c.state == 'removed'
wait_for(lambda: client.reload(map).state != 'active')
map = client.wait_success(map)
assert map.state == 'removed'
s = client.wait_success(s)
env = client.wait_success(env)
assert s.state == 'removed'
assert env.state == 'removed'
def test_container_two_remove(client, context):
project, service, c = _create_service(client, context)
project, service, c = _create_service(client, context, project, service)
s = find_one(c.services)
maps = s.serviceExposeMaps()
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert len(maps) == 2
c = client.delete(c)
c = client.wait_success(c)
assert c.state == 'removed'
wait_for(lambda: len([x for x in s.serviceExposeMaps()
if x.removed is None]) == 1)
s = client.wait_success(s)
env = client.wait_success(env)
assert s.state == 'active'
assert env.state == 'active'
def test_service_two_remove(client, context):
project, service, c = _create_service(client, context)
project, _, _ = _create_service(client, context, project)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert len(env.services()) == 2
assert s.state == 'active'
s = client.delete(s)
s = client.wait_success(s)
assert s.state == 'removed'
map = client.wait_success(map)
assert map.state == 'removed'
c = client.wait_success(c)
assert c.state == 'removed'
env = client.wait_success(env)
assert env.state == 'active'
def test_service_remove(client, context):
project, service, c = _create_service(client, context)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert s.state == 'active'
s = client.delete(s)
s = client.wait_success(s)
assert s.state == 'removed'
map = client.wait_success(map)
assert map.state == 'removed'
c = client.wait_success(c)
assert c.state == 'removed'
env = client.wait_success(env)
assert env.state == 'removed'
def test_env_remove(client, context):
project, service, c = _create_service(client, context)
s = find_one(c.services)
map = find_one(s.serviceExposeMaps)
s = client.wait_success(s)
env = client.wait_success(s.environment())
assert s.state == 'active'
env = client.delete(env)
env = client.wait_success(env)
assert env.state == 'removed'
s = client.wait_success(s)
assert s.state == 'removed'
map = client.wait_success(map)
assert map.state == 'removed'
c = client.wait_success(c)
assert c.state == 'removed'
def test_compose_project_create_required(client, context):
template = 'nginx:\n image: nginx'
assert_required_fields(client.create_compose_project, name=random_str(),
templates={'x': template})
def test_compose_project_create(client, context):
name = random_str()
template = 'nginx:' \
' image: nginx'
project = client.create_compose_project(name=name,
templates={'x': template})
project = client.wait_success(project)
assert project.name == name
assert project.state == 'active'
assert project.type == 'composeProject'
assert project.kind == 'composeProject'
assert project.templates == {'x': template}
| wlan0/cattle | tests/integration-v1/cattletest/core/test_compose.py | Python | apache-2.0 | 5,735 | 0 |
# vim:ts=4:sts=4:sw=4:expandtab
"""Package. Manages event queues.
Writing event-driven code
-------------------------
Event-driven procedures should be written as python coroutines (extended generators).
To call the event API, yield an instance of the appropriate command. You can use
sub-procedures - just yield the appropriate generator (a minor nuisance is that you
cannot have such sub-procedure return a value).
Example
-------
.. code:: python
from satori.events import *
def countdown():
queue = QueueId('any string will do')
mapping = yield Map({}, queue)
yield Attach(queue)
yield Send(Event(left=10))
while True:
q, event = yield Receive()
if event.left == 0:
break
event.left -= 1
yield Send(event)
yield Unmap(mapping)
yield Detach(queue)
"""
from .api import Event, MappingId, QueueId
from .protocol import Attach, Detach
from .protocol import Map, Unmap
from .protocol import Send, Receive
from .protocol import KeepAlive, Disconnect, ProtocolError
from .api import Manager
from .master import Master
from .slave import Slave
from .client2 import Client2
from .slave2 import Slave2
__all__ = (
'Event', 'MappingId', 'QueueId',
'Attach', 'Detach',
'Map', 'Unmap',
'Send', 'Receive',
'KeepAlive', 'ProtocolError',
'Master', 'Slave',
)
| zielmicha/satori | satori.events/satori/events/__init__.py | Python | mit | 1,410 | 0.002128 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-06-01 12:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0276_professional_integration'),
]
operations = [
migrations.AlterField(
model_name='learningunit',
name='end_year',
field=models.IntegerField(blank=True, null=True, verbose_name='end_year_title'),
),
migrations.AlterField(
model_name='learningunit',
name='start_year',
field=models.IntegerField(verbose_name='start_year'),
),
]
| uclouvain/OSIS-Louvain | base/migrations/0277_auto_20180601_1458.py | Python | agpl-3.0 | 682 | 0.001466 |
from django.http import StreamingHttpResponse, HttpResponseServerError
from download_service.zipbuilder import DDSZipBuilder, NotFoundException, NotSupportedException
from django.contrib.auth.decorators import login_required
from download_service.utils import make_client
from django.http import Http404
@login_required
def dds_project_zip(request, project_id, filename):
client = make_client(request.user)
builder = DDSZipBuilder(project_id, client)
try:
builder.raise_on_filename_mismatch(filename)
response = StreamingHttpResponse(builder.build_streaming_zipfile(), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
except NotFoundException as e:
raise Http404(str(e))
except NotSupportedException as e:
return HttpResponseServerError(content=str(e))
| Duke-GCB/DukeDSHandoverService | download_service/views.py | Python | mit | 899 | 0.003337 |
#!/usr/bin/python
'''
This SimpleCV example uses a technique called frame differencing to determine
if motion has occured. You take an initial image, then another, subtract
the difference, what is left over is what has changed between those two images
this are typically blobs on the images, so we do a blob search to count
the number of blobs and if they exist then motion has occured
'''
from __future__ import print_function
import sys, time, socket
from SimpleCV import *
cam = Camera() #setup the camera
#settings for the project
min_size = 0.1*cam.getProperty("width")*cam.getProperty("height") #make the threshold adapatable for various camera sizes
thresh = 10 # frame diff threshold
show_message_for = 2 # the amount of seconds to show the motion detected message
motion_timestamp = int(time.time())
message_text = "Motion detected"
draw_message = False
lastImg = cam.getImage()
lastImg.show()
while True:
newImg = cam.getImage()
trackImg = newImg - lastImg # diff the images
blobs = trackImg.findBlobs() #use adapative blob detection
now = int(time.time())
#If blobs are found then motion has occured
if blobs:
motion_timestamp = now
draw_message = True
#See if the time has exceeded to display the message
if (now - motion_timestamp) > show_message_for:
draw_message = False
#Draw the message on the screen
if(draw_message):
newImg.drawText(message_text, 5,5)
print(message_text)
lastImg = newImg # update the image
newImg.show()
| tpltnt/SimpleCV | SimpleCV/examples/detection/MotionTracker.py | Python | bsd-3-clause | 1,540 | 0.012987 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012-2014 Paul Franklin
# Copyright (C) 2012 Nicolas Adenis-Lamarre
# Copyright (C) 2012 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" fanchart report """
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from math import pi, cos, sin, log10, acos
def log2(val):
"""
Calculate the log base 2 of a value.
"""
return int(log10(val)/log10(2))
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.menu import (EnumeratedListOption, NumberOption,
PersonOption, BooleanOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.config import config
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.lib import EventType
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
#------------------------------------------------------------------------
#
# private constants
#
#------------------------------------------------------------------------
FULL_CIRCLE = 0
HALF_CIRCLE = 1
QUAR_CIRCLE = 2
BACKGROUND_WHITE = 0
BACKGROUND_GEN = 1
RADIAL_UPRIGHT = 0
RADIAL_ROUNDABOUT = 1
# minor offset just usefull for generation 11,
# to not a bit offset between the text and the polygon
# this can be considered as a bad hack
WEDGE_TEXT_BARRE_OFFSET = 0.0016
#------------------------------------------------------------------------
#
# private functions
#
#------------------------------------------------------------------------
def draw_wedge(doc, style, centerx, centery, radius, start_angle,
end_angle, do_rendering, short_radius=0):
"""
Draw a wedge shape.
"""
while end_angle < start_angle:
end_angle += 360
path = []
degreestoradians = pi / 180.0
radiansdelta = degreestoradians / 2
sangle = start_angle * degreestoradians
eangle = end_angle * degreestoradians
while eangle < sangle:
eangle = eangle + 2 * pi
angle = sangle
if short_radius == 0:
if (end_angle - start_angle) != 360:
path.append((centerx, centery))
else:
origx = (centerx + cos(angle) * short_radius)
origy = (centery + sin(angle) * short_radius)
path.append((origx, origy))
while angle < eangle:
_x_ = centerx + cos(angle) * radius
_y_ = centery + sin(angle) * radius
path.append((_x_, _y_))
angle = angle + radiansdelta
_x_ = centerx + cos(eangle) * radius
_y_ = centery + sin(eangle) * radius
path.append((_x_, _y_))
if short_radius:
_x_ = centerx + cos(eangle) * short_radius
_y_ = centery + sin(eangle) * short_radius
path.append((_x_, _y_))
angle = eangle
while angle >= sangle:
_x_ = centerx + cos(angle) * short_radius
_y_ = centery + sin(angle) * short_radius
path.append((_x_, _y_))
angle -= radiansdelta
if do_rendering:
doc.draw_path(style, path)
delta = (eangle - sangle) / 2.0
rad = short_radius + (radius - short_radius) / 2.0
return ((centerx + cos(sangle + delta + WEDGE_TEXT_BARRE_OFFSET) * rad),
(centery + sin(sangle + delta + WEDGE_TEXT_BARRE_OFFSET) * rad))
#------------------------------------------------------------------------
#
# FanChart
#
#------------------------------------------------------------------------
class FanChart(Report):
def __init__(self, database, options, user):
"""
Create the FanChart object that produces the report.
The arguments are:
database - the Gramps database instance
options - instance of the Options class for this report
user - a gen.user.User instance
This report needs the following parameters (class variables)
that come in the options class.
maxgen - Maximum number of generations to include.
circle - Draw a full circle, half circle, or quarter circle.
background - Background color is generation dependent or white.
radial - Print radial texts roundabout or as upright as possible.
draw_empty - draw background when there is no information
same_style - use the same style for all generation
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, self._locale)
self.database = CacheProxyDb(self.database)
self.max_generations = menu.get_option_by_name('maxgen').get_value()
self.circle = menu.get_option_by_name('circle').get_value()
self.background = menu.get_option_by_name('background').get_value()
self.radial = menu.get_option_by_name('radial').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.draw_empty = menu.get_option_by_name('draw_empty').get_value()
self.same_style = menu.get_option_by_name('same_style').get_value()
self.center_person = self.database.get_person_from_gramps_id(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
self.graphic_style = []
self.text_style = []
for i in range(0, self.max_generations):
self.graphic_style.append('FC-Graphic' + '%02d' % i)
self.text_style.append('FC-Text' + '%02d' % i)
self.calendar = 0
self.height = 0
self.map = [None] * 2**self.max_generations
self.text = {}
def apply_filter(self, person_handle, index):
"""traverse the ancestors recursively until either the end
of a line is found, or until we reach the maximum number of
generations that we want to deal with"""
if (not person_handle) or (index >= 2**self.max_generations):
return
self.map[index-1] = person_handle
self.text[index-1] = self.get_info(person_handle, log2(index))
person = self.database.get_person_from_handle(person_handle)
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.database.get_family_from_handle(family_handle)
self.apply_filter(family.get_father_handle(), index*2)
self.apply_filter(family.get_mother_handle(), (index*2)+1)
def write_report(self):
self.doc.start_page()
self.apply_filter(self.center_person.get_handle(), 1)
p_rn = self.center_person.get_primary_name().get_regular_name()
if self.circle == FULL_CIRCLE:
max_angle = 360.0
start_angle = 90
max_circular = 5
_x_ = self.doc.get_usable_width() / 2.0
_y_ = self.doc.get_usable_height() / 2.0
min_xy = min(_x_, _y_)
elif self.circle == HALF_CIRCLE:
max_angle = 180.0
start_angle = 180
max_circular = 3
_x_ = (self.doc.get_usable_width()/2.0)
_y_ = self.doc.get_usable_height()
min_xy = min(_x_, _y_)
else: # quarter circle
max_angle = 90.0
start_angle = 270
max_circular = 2
_x_ = 0
_y_ = self.doc.get_usable_height()
min_xy = min(self.doc.get_usable_width(), _y_)
# choose one line or two lines translation according to the width
title = self._("%(generations)d Generation Fan Chart "
"for %(person)s") % {
'generations' : self.max_generations,
'person' : p_rn}
title_nb_lines = 1
style_sheet = self.doc.get_style_sheet()
if style_sheet:
p_style = style_sheet.get_paragraph_style('FC-Title')
if p_style:
font = p_style.get_font()
if font:
title_width = utils.pt2cm(self.doc.string_width(font,
title))
if title_width > self.doc.get_usable_width():
title = self._(
"%(generations)d Generation Fan Chart "
"for\n%(person)s") % {
'generations' : self.max_generations,
'person' : p_rn}
title_nb_lines = 2
if self.circle == FULL_CIRCLE or self.circle == QUAR_CIRCLE:
# adjust only if full circle or 1/4 circle in landscape mode
if self.doc.get_usable_height() <= self.doc.get_usable_width():
# Should be in Landscape now
style_sheet = self.doc.get_style_sheet()
p_style = style_sheet.get_paragraph_style('FC-Title')
if p_style:
font = p_style.get_font()
if font:
fontsize = utils.pt2cm(font.get_size())
# _y_ is vertical distance to center of circle,
# move center down 1 fontsize
_y_ += fontsize*title_nb_lines
# min_XY is the diameter of the circle,
# subtract two fontsize
# so we dont draw outside bottom of the paper
min_xy = min(min_xy, _y_ - 2*fontsize*title_nb_lines)
if self.max_generations > max_circular:
block_size = min_xy / (self.max_generations * 2 - max_circular)
else:
block_size = min_xy / self.max_generations
# adaptation of the fonts (title and others)
optimized_style_sheet = self.get_optimized_style_sheet(
title, max_circular, block_size, self.same_style,
not self.same_style,
# if same_style, use default generated colors
self.background == BACKGROUND_WHITE)
if optimized_style_sheet:
self.doc.set_style_sheet(optimized_style_sheet)
# title
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.center_text('FC-Graphic-title', title,
self.doc.get_usable_width() / 2, 0, mark)
# wheel
for generation in range(0, min(max_circular, self.max_generations)):
self.draw_circular(_x_, _y_,
start_angle, max_angle, block_size, generation)
for generation in range(max_circular, self.max_generations):
self.draw_radial(_x_, _y_,
start_angle, max_angle, block_size, generation)
self.doc.end_page()
def get_info(self, person_handle, generation):
""" get info about a person """
person = self.database.get_person_from_handle(person_handle)
p_pn = person.get_primary_name()
self.calendar = config.get('preferences.calendar-format-report')
birth = get_birth_or_fallback(self.database, person)
bth = ""
if birth:
bth = birth.get_date_object()
bth = str(bth.to_calendar(self.calendar).get_year())
if bth == 0:
bth = ""
elif birth.get_type() != EventType.BIRTH:
bth += '*'
death = get_death_or_fallback(self.database, person)
dth = ""
if death:
dth = death.get_date_object()
dth = str(dth.to_calendar(self.calendar).get_year())
if dth == 0:
dth = ""
elif death.get_type() != EventType.DEATH:
dth += '*'
if bth and dth:
val = "%s - %s" % (str(bth), str(dth))
elif bth:
val = "* %s" % (str(bth))
elif dth:
val = "+ %s" % (str(dth))
else:
val = ""
if generation > 7:
if (p_pn.get_first_name() != "") and (p_pn.get_surname() != ""):
name = p_pn.get_first_name() + " " + p_pn.get_surname()
else:
name = p_pn.get_first_name() + p_pn.get_surname()
if (name != "") and (val != ""):
string = name + self._(", ") + val # Arabic OK
else:
string = name + val
return [string]
elif generation == 7:
if (p_pn.get_first_name() != "") and (p_pn.get_surname() != ""):
name = p_pn.get_first_name() + " " + p_pn.get_surname()
else:
name = p_pn.get_first_name() + p_pn.get_surname()
if self.circle == FULL_CIRCLE:
return [name, val]
elif self.circle == HALF_CIRCLE:
return [name, val]
else:
if (name != "") and (val != ""):
string = name + self._(", ") + val # Arabic OK
else:
string = name + val
return [string]
elif generation == 6:
if self.circle == FULL_CIRCLE:
return [p_pn.get_first_name(), p_pn.get_surname(), val]
elif self.circle == HALF_CIRCLE:
return [p_pn.get_first_name(), p_pn.get_surname(), val]
else:
if (p_pn.get_first_name() != "") and (p_pn.get_surname() != ""):
name = p_pn.get_first_name() + " " + p_pn.get_surname()
else:
name = p_pn.get_first_name() + p_pn.get_surname()
return [name, val]
else:
return [p_pn.get_first_name(), p_pn.get_surname(), val]
def get_max_width_for_circles(self, rad1, rad2, max_centering_proportion):
r"""
(the "r" in the above line is to keep pylint happy)
__
/__\ <- compute the line width which is drawable between 2 circles.
/ _ \ max_centering_proportion : 0, touching the circle1, 1,
| |_| | touching the circle2, 0.5 : middle between the 2 circles
| |
\ /
\__/
basically, max_centering_proportion is
max_centering_proportion/nb_lines
"""
# radius at the center of the 2 circles
rmid = rad2 - (rad2-rad1)*max_centering_proportion
return sin(acos(rmid/rad2)) * rad2 * 2
def get_max_width_for_circles_line(self, rad1, rad2, line, nb_lines,
centering=False):
r"""
(the "r" in the above line is to keep pylint happy)
__
/__\ <- compute the line width which is drawable between 2 circles.
/ _ \ instead of a max_centering_proportion, you get a
| |_| | line/nb_lines position. (we suppose that lines have the
| | same heights.) for example, if you've 2 lines to draw,
\ / line 2 max width is at the 2/3 between the 2 circles
\__/
"""
if centering:
return self.get_max_width_for_circles(rad1, rad2, 1.0)
else:
return self.get_max_width_for_circles(rad1, rad2,
line/float(nb_lines+1))
def get_optimized_font_size_for_text(self, rad1, rad2, text, font,
centering=False):
"""
a text can be several lines
find the font size equals or lower than font.get_size() which fit
between rad1 and rad2 to display the text.
centering is a special case when you've the full circle
available to draw the text in the middle of it
"""
min_font_size = font.get_size()
i = 1
nb_lines = len(text)
for line in text:
font_size = self.get_optimized_font_size(
line, font,
self.get_max_width_for_circles_line(rad1, rad2, i, nb_lines,
centering))
i += 1
if min_font_size > font_size:
min_font_size = font_size
return min_font_size
def get_optimized_font_size(self, line, font, max_width):
"""
for a given width, guess the best font size which is equals
or smaller than font which make line fit into max_width
"""
test_font = FontStyle(font)
width = utils.pt2cm(self.doc.string_width(test_font, line))
while width > max_width and test_font.get_size() > 1:
test_font.set_size(test_font.get_size() -1)
width = utils.pt2cm(self.doc.string_width(test_font, line))
return test_font.get_size()
def get_optimized_style_sheet(self, title, max_circular, block_size,
map_style_from_single,
map_paragraphs_colors_to_graphics,
make_background_white):
"""
returns an optimized (modified) style sheet which make fanchart
look nicer
"""
new_style_sheet = self.doc.get_style_sheet()
if not new_style_sheet:
return self.doc.get_style_sheet()
# update title font size
pstyle_name = 'FC-Title'
p_style = new_style_sheet.get_paragraph_style(pstyle_name)
if p_style:
title_font = p_style.get_font()
if title_font:
title_width = utils.pt2cm(
self.doc.string_multiline_width(title_font, title))
while (title_width > self.doc.get_usable_width() and
title_font.get_size() > 1):
title_font.set_size(title_font.get_size()-1)
title_width = utils.pt2cm(
self.doc.string_multiline_width(title_font, title))
new_style_sheet.add_paragraph_style(pstyle_name, p_style)
# biggest font allowed is the one of the fist generation, after,
# always lower than the previous one
p_style = new_style_sheet.get_paragraph_style(self.text_style[0])
font = None
if p_style:
font = p_style.get_font()
if font:
previous_generation_font_size = font.get_size()
for generation in range(0, self.max_generations):
gstyle_name = self.graphic_style[generation]
pstyle_name = self.text_style[generation]
g_style = new_style_sheet.get_draw_style(gstyle_name)
# p_style is a copy of 'FC-Text' - use different style
# to be able to auto change some fonts for some generations
if map_style_from_single:
p_style = new_style_sheet.get_paragraph_style('FC-Text')
else:
p_style = new_style_sheet.get_paragraph_style(pstyle_name)
if g_style and p_style:
# set graphic colors to paragraph colors,
# while it's functionnaly
# the same for fanchart or make backgrounds white
if make_background_white:
g_style.set_fill_color((255, 255, 255))
new_style_sheet.add_draw_style(gstyle_name, g_style)
elif map_paragraphs_colors_to_graphics:
pstyle = new_style_sheet.get_paragraph_style(
pstyle_name)
if pstyle:
g_style.set_fill_color(
pstyle.get_background_color())
new_style_sheet.add_draw_style(gstyle_name,
g_style)
# adapt font size if too big
segments = 2**generation
if generation < min(max_circular, self.max_generations):
# adpatation for circular fonts
rad1, rad2 = self.get_circular_radius(
block_size, generation, self.circle)
font = p_style.get_font()
if font:
min_font_size = font.get_size()
# find the smallest font required
for index in range(segments - 1, 2*segments - 1):
if self.map[index]:
font_size = \
self.get_optimized_font_size_for_text(
rad1, rad2, self.text[index],
p_style.get_font(),
(self.circle == FULL_CIRCLE and
generation == 0)
)
if font_size < min_font_size:
min_font_size = font_size
font.set_size(min(previous_generation_font_size,
min_font_size))
else:
# adaptation for radial fonts
# find the largest string for the generation
longest_line = ""
longest_width = 0
for index in range(segments - 1, 2*segments - 1):
if self.map[index]:
for line in self.text[index]:
width = utils.pt2cm(
self.doc.string_multiline_width(
p_style.get_font(), line))
if width > longest_width:
longest_line = line
longest_width = width
# determine maximum width allowed for this generation
rad1, rad2 = self.get_radial_radius(
block_size, generation, self.circle)
max_width = rad2 - rad1
# reduce the font so that longest_width
# fit into max_width
font = p_style.get_font()
if font:
font.set_size(min(previous_generation_font_size,
self.get_optimized_font_size(
longest_line,
p_style.get_font(),
max_width)))
# redefine the style
new_style_sheet.add_paragraph_style(pstyle_name, p_style)
font = p_style.get_font()
if font:
previous_generation_font_size = font.get_size()
# finished
return new_style_sheet
def draw_circular(self, _x_, _y_,
start_angle, max_angle, size, generation):
segments = 2**generation
delta = max_angle / segments
end_angle = start_angle
text_angle = start_angle - 270 + (delta / 2.0)
rad1, rad2 = self.get_circular_radius(size, generation, self.circle)
graphic_style = self.graphic_style[generation]
for index in range(segments - 1, 2*segments - 1):
start_angle = end_angle
end_angle = start_angle + delta
(_xc, _yc) = draw_wedge(self.doc, graphic_style, _x_, _y_, rad2,
start_angle, end_angle,
self.map[index] or self.draw_empty, rad1)
if self.map[index]:
if (generation == 0) and self.circle == FULL_CIRCLE:
_yc = _y_
person = self.database.get_person_from_handle(self.map[index])
mark = utils.get_person_mark(self.database, person)
self.doc.rotate_text(graphic_style, self.text[index],
_xc, _yc, text_angle, mark)
text_angle += delta
def get_radial_radius(self, size, generation, circle):
""" determine the radius """
if circle == FULL_CIRCLE:
rad1 = size * ((generation * 2) - 5)
rad2 = size * ((generation * 2) - 3)
elif circle == HALF_CIRCLE:
rad1 = size * ((generation * 2) - 3)
rad2 = size * ((generation * 2) - 1)
else: # quarter circle
rad1 = size * ((generation * 2) - 2)
rad2 = size * (generation * 2)
return rad1, rad2
def get_circular_radius(self, size, generation, circle):
""" determine the radius """
return size * generation, size * (generation + 1)
def draw_radial(self, _x_, _y_,
start_angle, max_angle, size, generation):
segments = 2**generation
delta = max_angle / segments
end_angle = start_angle
text_angle = start_angle - delta / 2.0
graphic_style = self.graphic_style[generation]
rad1, rad2 = self.get_radial_radius(size, generation, self.circle)
for index in range(segments - 1, 2*segments - 1):
start_angle = end_angle
end_angle = start_angle + delta
(_xc, _yc) = draw_wedge(self.doc, graphic_style, _x_, _y_, rad2,
start_angle, end_angle,
self.map[index] or self.draw_empty, rad1)
text_angle += delta
if self.map[index]:
person = self.database.get_person_from_handle(self.map[index])
mark = utils.get_person_mark(self.database, person)
if (self.radial == RADIAL_UPRIGHT
and (start_angle >= 90)
and (start_angle < 270)):
self.doc.rotate_text(graphic_style, self.text[index],
_xc, _yc, text_angle + 180, mark)
else:
self.doc.rotate_text(graphic_style, self.text[index],
_xc, _yc, text_angle, mark)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class FanChartOptions(MenuReportOptions):
""" options for fanchart report """
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
self.max_generations = 11
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
return _nd.display(person)
def add_menu_options(self, menu):
"""
Add options to the menu for the fan chart.
"""
category_name = _("Report Options")
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", self.__pid)
max_gen = NumberOption(_("Generations"), 5, 1, self.max_generations)
max_gen.set_help(_("The number of generations "
"to include in the report"))
menu.add_option(category_name, "maxgen", max_gen)
circle = EnumeratedListOption(_('Type of graph'), HALF_CIRCLE)
circle.add_item(FULL_CIRCLE, _('full circle'))
circle.add_item(HALF_CIRCLE, _('half circle'))
circle.add_item(QUAR_CIRCLE, _('quarter circle'))
circle.set_help(_("The form of the graph: full circle, half circle,"
" or quarter circle."))
menu.add_option(category_name, "circle", circle)
background = EnumeratedListOption(_('Background color'), BACKGROUND_GEN)
background.add_item(BACKGROUND_WHITE, _('white'))
background.add_item(BACKGROUND_GEN, _('generation dependent'))
background.set_help(_("Background color is either white or generation"
" dependent"))
menu.add_option(category_name, "background", background)
radial = EnumeratedListOption(_('Orientation of radial texts'),
RADIAL_UPRIGHT)
radial.add_item(RADIAL_UPRIGHT, _('upright'))
radial.add_item(RADIAL_ROUNDABOUT, _('roundabout'))
radial.set_help(_("Print radial texts upright or roundabout"))
menu.add_option(category_name, "radial", radial)
draw_empty = BooleanOption(_("Draw empty boxes"), True)
draw_empty.set_help(_("Draw the background "
"although there is no information"))
menu.add_option(category_name, "draw_empty", draw_empty)
same_style = BooleanOption(_("Use one font style "
"for all generations"), True)
same_style.set_help(_("You can customize font and color "
"for each generation in the style editor"))
menu.add_option(category_name, "same_style", same_style)
category_name = _("Report Options (2)")
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
def make_default_style(self, default_style):
"""Make the default output style for the Fan Chart report."""
background_colors = [(255, 63, 0),
(255, 175, 15),
(255, 223, 87),
(255, 255, 111),
(159, 255, 159),
(111, 215, 255),
(79, 151, 255),
(231, 23, 255),
(231, 23, 221),
(210, 170, 124),
(189, 153, 112)
]
#Paragraph Styles
f_style = FontStyle()
f_style.set_size(18)
f_style.set_bold(1)
f_style.set_type_face(FONT_SANS_SERIF)
p_style = ParagraphStyle()
p_style.set_font(f_style)
p_style.set_alignment(PARA_ALIGN_CENTER)
p_style.set_description(_('The style used for the title.'))
default_style.add_paragraph_style("FC-Title", p_style)
f_style = FontStyle()
f_style.set_size(9)
f_style.set_type_face(FONT_SANS_SERIF)
p_style = ParagraphStyle()
p_style.set_font(f_style)
p_style.set_alignment(PARA_ALIGN_CENTER)
p_style.set_description(
_('The basic style used for the text display.'))
default_style.add_paragraph_style("FC-Text", p_style)
for i in range(0, self.max_generations):
f_style = FontStyle()
f_style.set_size(9)
f_style.set_type_face(FONT_SANS_SERIF)
p_style = ParagraphStyle()
p_style.set_font(f_style)
p_style.set_alignment(PARA_ALIGN_CENTER)
p_style.set_description(
_('The style used for the text display of generation "%d"') % i)
default_style.add_paragraph_style("FC-Text" + "%02d" % i, p_style)
# GraphicsStyles
g_style = GraphicsStyle()
g_style.set_paragraph_style('FC-Title')
default_style.add_draw_style('FC-Graphic-title', g_style)
for i in range(0, self.max_generations):
g_style = GraphicsStyle()
g_style.set_paragraph_style('FC-Text' + '%02d' % i)
g_style.set_fill_color(background_colors[i])
default_style.add_draw_style('FC-Graphic' + '%02d' % i, g_style)
| jralls/gramps | gramps/plugins/drawreport/fanchart.py | Python | gpl-2.0 | 33,745 | 0.001245 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
#------------------------------------------------------------
import selenium
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
import urlparse,urllib2,urllib,re,xbmcplugin,xbmcgui,xbmcaddon,xbmc
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
import cookielib
import requests
import os.path
__channel__ = "itastreaming"
__category__ = "F"
__type__ = "generic"
__title__ = "itastreaming"
__language__ = "IT"
COOKIEFILE = "/Users/arturo/itacookie.lwp"
h = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:37.0) Gecko/20100101 Firefox/37.0'}
baseUrl = "http://itastreaming.co"
def createCookies():
if not os.path.isfile(COOKIEFILE):
print "File not exists"
#get cookies!
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:37.0) Gecko/20100101 Firefox/37.0")
browser = webdriver.PhantomJS(executable_path='/bin/phantomjs',desired_capabilities = dcap, service_log_path=os.path.devnull)
browser.get(baseUrl)
time.sleep(10)
a = browser.get_cookies()
print 'Got cloudflare cookies:\n'
browser.close()
b = cookielib.MozillaCookieJar()
for i in a:
# create the cf_session_cookie
ck = cookielib.Cookie(name=i['name'], value=i['value'], domain=i['domain'], path=i['path'], secure=i['secure'], rest=False, version=0,port=None,port_specified=False,domain_specified=False,domain_initial_dot=False,path_specified=True,expires=i['expiry'],discard=True,comment=None,comment_url=None,rfc2109=False)
b.set_cookie(ck)
# save into a file
print b
b.save(filename=COOKIEFILE, ignore_discard=True, ignore_expires=False)
else:
print "found it, do nothing!"
b = True
return b
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.itastreaming mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__ , action="movies", title="ultimi film inseriti..." , url="http://itastreaming.co" ))
itemlist.append( Item(channel=__channel__ , action="search", title="Cerca Film"))
itemlist.append( Item(channel=__channel__ , action="movies", title="animazione" , url="http://itastreaming.co/genere/animazione" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="avventura" , url="http://itastreaming.co/genere/avventura" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="azione" , url="http://itastreaming.co/genere/azione" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="biografico" , url="http://itastreaming.co/genere/biografico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="comico" , url="http://itastreaming.co/genere/comico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="commedia" , url="http://itastreaming.co/genere/commedia" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="documentario" , url="http://itastreaming.co/genere/documentario" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="drammatico" , url="http://itastreaming.co/genere/drammatico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="erotico" , url="http://itastreaming.co/genere/erotico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="fantascienza" , url="http://itastreaming.co/genere/fantascienza" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="fantasy" , url="http://itastreaming.co/genere/fantasy" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="gangstar" , url="http://itastreaming.co/genere/gangstar" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="giallo" , url="http://itastreaming.co/genere/giallo" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="guerra" , url="http://itastreaming.co/genere/guerra" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="horror" , url="http://itastreaming.co/genere/horror" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="musical" , url="http://itastreaming.co/genere/musical" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="romantico" , url="http://itastreaming.co/genere/romantico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="storico" , url="http://itastreaming.co/genere/storico" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="thriller" , url="http://itastreaming.co/genere/thriller" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="western" , url="http://itastreaming.co/genere/western" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="HD" , url="http://itastreaming.co/qualita/hd" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="DVD-RIP" , url="http://itastreaming.co/qualita/dvdripac3" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="CAM" , url="http://itastreaming.co/qualita/cam" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="HD-MD" , url="http://itastreaming.co/qualita/hd-md" ))
itemlist.append( Item(channel=__channel__ , action="movies", title="HD-TS" , url="http://itastreaming.co/qualita/hd-ts" ))
return itemlist
#searching for films
def search(item, text):
createCookies()
itemlist = []
text = text.replace(" ", "%20")
item.url = "http://itastreaming.co/?s=" + text
try:
biscotto = cookielib.MozillaCookieJar()
biscotto.load(COOKIEFILE)
data = requests.get(item.url, cookies=biscotto, headers=h)
data = data.text.encode('utf-8')
data = data.replace('–','-').replace('’',' ')
pattern = '<img class="imx" style="margin-top:0px;" src="?([^>"]+)"?.*?alt="?([^>"]+)"?.*?'
pattern += '<h3><a href="?([^>"]+)"?.*?</h3>'
matches = re.compile(pattern,re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
#thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
thumbnail = scrapthumb(title)
itemlist.append(Item(channel=__channel__, action="grabing", title=title, url=url, thumbnail=thumbnail, folder=True))
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
#azione "movies" server per estrerre i titoli
def movies(item):
createCookies()
itemlist = []
biscotto = cookielib.MozillaCookieJar()
biscotto.load(COOKIEFILE)
data = requests.get(item.url, cookies=biscotto, headers=h)
data = data.text.encode('utf-8')
data = data.replace('–','-').replace('’',' ')
patron = '<div class="item">\s*'
patron += '<a href="?([^>"]+)"?.*?title="?([^>"]+)"?.*?'
patron += '<div class="img">\s*'
patron += '<img.*?src="([^>"]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
if not matches:
print "Coockies expired!, delete it"
os.remove(COOKIEFILE)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = scrapthumb(title)
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="grabing", title=title , url=url , thumbnail=thumbnail , plot=scrapedplot , folder=True) )
#next page
patternpage = '<a rel="nofollow" class="previouspostslink\'" href="(.*?)">Seguente \›</a>'
matches = re.compile(patternpage,re.DOTALL).findall(data)
#print matches
if not matches:
patternpage = "<span class='current'.*?</span>"
patternpage += "<a rel='nofollow' class='page larger' href='([^']+)'>.*?</a>"
matches = re.compile(patternpage,re.DOTALL).findall(data)
#print matches
if len(matches)>0:
scrapedurl = urlparse.urljoin(item.url,matches[0])
itemlist.append( Item(channel=__channel__, action="movies", title="Next Page >>" , url=scrapedurl , folder=True) )
return itemlist
def grabing(item):
itemlist = []
biscotto = cookielib.MozillaCookieJar()
biscotto.load(COOKIEFILE)
data = requests.get(item.url, cookies=biscotto, headers=h)
data = data.text.encode('utf-8')
#esegue questa funziona solo se si clicca sul titolo del film
if item.title:
filmtitle = str(item.title)
filmtitle = filmtitle.replace('–','')
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:37.0) Gecko/20100101 Firefox/37.0")
browser = webdriver.PhantomJS(executable_path='/bin/phantomjs',desired_capabilities = dcap, service_log_path=os.path.devnull)
browser.get(item.url)
time.sleep(7)
try:
nData = browser.execute_script("return nData")
print nData
for block in nData:
itemlist.append( Item(channel=__channel__, action="playit", title=filmtitle + " quality: " + block['width'] + " x " + block['height'] , url=block['url'] ))
browser.close()
except:
fakeurl = re.findall('"((http)s?://.*?hdpass.link.*?)"', data)
print fakeurl
url = fakeurl[0][0]
browser.get(url)
time.sleep(7)
nData = browser.execute_script("return nData")
print nData
print filmtitle
for block in nData:
print block['url']
itemlist.append( Item(channel=__channel__, action="playit", title=filmtitle + " quality: " + block['width'] + " x " + block['height'] , url=block['url'] ))
browser.close()
return itemlist
def playit(item):
itemlist = []
print item.url
itemlist.append( Item(channel=__channel__, action="playit", title=item.title , url=item.url ))
if not xbmc.Player().isPlayingVideo():
xbmc.Player(xbmc.PLAYER_CORE_DVDPLAYER).play(item.url)
return itemlist
def scrapthumb(title):
title = title.strip().replace('–','').replace('’','-').replace('à','a')
title = title.replace(' ','-')
title = title[:-7]
#print title
mdburl = 'https://www.themoviedb.org/search/movie?query=' + title
req = urllib2.Request(mdburl)
response = urllib2.urlopen(req)
data = response.read()
pattern = '<div class="poster">\s*'
pattern += '<a.*?src="(.*?)"'
matches = re.compile(pattern,re.DOTALL).findall(data)
thumbnail = ""
if matches:
thumbnail = matches[0]
thumbnail = thumbnail.replace('w92','original')
else:
print "thumb not found for: " + mdburl
return thumbnail
| Reat0ide/plugin.video.pelisalacarta | pelisalacarta/channels/itastreaming.py | Python | gpl-3.0 | 11,292 | 0.021891 |
from coalib.bearlib.aspects import Root, Taste
@Root.subaspect
class Spelling:
"""
How words should be written.
"""
class docs:
example = """
'Tihs si surly som incoreclt speling.
`Coala` is always written with a lowercase `c`.
"""
example_language = 'reStructuredText'
importance_reason = """
Words should always be written as they are supposed to be;
standardisation facilitates communication.
"""
fix_suggestions = """
Use the correct spelling for the misspelled words.
"""
@Spelling.subaspect
class DictionarySpelling:
"""
Valid language's words spelling.
"""
class docs:
example = """
This is toatly wonrg.
"""
example_language = 'reStructuredText'
importance_reason = """
Good spelling facilitates communication and avoids confusion. By
following the same rules for spelling words, we can all understand
the text we read. Poor spelling distracts the reader and they lose
focus.
"""
fix_suggestions = """
You can use a spell-checker to fix this for you or just ensure
yourself that things are well written.
"""
@Spelling.subaspect
class OrgSpecificWordSpelling:
"""
Organisations like coala specified words' spelling.
"""
class docs:
example = """
`Coala` is always written with a lower case c, also at the beginning
of the sentence.
"""
example_language = 'reStructuredText'
importance_reason = """
There are words you want to be written as you want, like your
organisation's name.
"""
fix_suggestions = """
Simply make sure those words match with what is provided by the
organisation.
"""
specific_word = Taste[list](
'Represents the regex of the specific word to check.',
(('c[o|O][a|A][l|L][a|A]',), ), default=list())
| kartikeys98/coala | coalib/bearlib/aspects/Spelling.py | Python | agpl-3.0 | 2,014 | 0 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2010-2012 OpenStack Foundation.
#
# Swift documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import logging
import os
from swift import __version__
import sys
# NOTE(amotoki): Our current doc build job uses an older version of
# liberasurecode which comes from Ubuntu 16.04.
# pyeclib emits a warning message if liberasurecode <1.3.1 is used [1] and
# this causes the doc build failure if warning-is-error is enabled in Sphinx.
# As a workaround we suppress the warning message from pyeclib until we use
# a newer version of liberasurecode in our doc build job.
# [1] https://github.com/openstack/pyeclib/commit/d163972b
logging.getLogger('pyeclib').setLevel(logging.ERROR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.extend([os.path.abspath('../swift'), os.path.abspath('..'),
os.path.abspath('../bin')])
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'openstackdocstheme']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swift'
copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['swift.']
# -- Options for HTML output -----------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# html_theme_path = ["."]
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'swiftdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Swift.tex', u'Swift Documentation',
u'Swift Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# -- Options for openstackdocstheme -------------------------------------------
repository_name = 'openstack/swift'
bug_project = 'swift'
bug_tag = ''
| smerritt/swift | doc/source/conf.py | Python | apache-2.0 | 8,171 | 0 |
class Location(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, direction):
return Location(self.x + direction.x, self.y + direction.y)
def __sub__(self, direction):
return Location(self.x - direction.x, self.y - direction.y)
def __repr__(self):
return 'Location({}, {})'.format(self.x, self.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
| Spycho/aimmo | aimmo-game-worker/simulation/location.py | Python | agpl-3.0 | 529 | 0 |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class StorageCoreDeviceWorld(Base):
'''
Operations on worlds pertaining to the pluggable storage architectures' logical devices on the system.
'''
moid = 'ha-cli-handler-storage-core-device-world'
def list(self, device=None):
'''
Get a list of the worlds that are currently using devices on the ESX host.
:param device: string, Filter the output of the command to limit the output to a specific device. This device name can be any of the UIDs registered for a device.
:returns: vim.EsxCLI.storage.core.device.world.list.ScsiDeviceWorld[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.storage.core.device.world.List',
device=device,
) | xuru/pyvisdk | pyvisdk/esxcli/handlers/ha_cli_handler_storage_core_device_world.py | Python | mit | 877 | 0.010262 |
class RCInput():
CHANNEL_COUNT = 14
channels = []
def __init__(self):
for i in range(0, self.CHANNEL_COUNT):
try:
f = open("/sys/kernel/rcio/rcin/ch%d" % i, "r")
self.channels.append(f)
except:
print ("Can't open file /sys/kernel/rcio/rcin/ch%d" % i)
def read(self, ch):
value = self.channels[ch].read()
position = self.channels[ch].seek(0, 0)
return value[:-1]
| adrienemery/auv-control-pi | navio/rcinput.py | Python | mit | 484 | 0.004132 |
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2008 Richard Spiers <richard.spiers@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from papyon.msnp2p.constants import *
from papyon.msnp2p.SLP import *
from papyon.msnp2p.transport import *
from papyon.msnp2p.session import P2PSession
from papyon.util.decorator import rw_property
import papyon.util.element_tree as ElementTree
import struct
import gobject
import logging
import base64
import os
import random
from papyon.media import MediaCall, MediaCandidate, MediaCandidateEncoder, \
MediaSessionMessage, MediaStreamDescription
from papyon.media.constants import MediaStreamDirection, MediaSessionType
__all__ = ['WebcamSession']
logger = logging.getLogger("papyon.msnp2p.webcam")
class WebcamSession(P2PSession, MediaCall):
def __init__(self, producer, session_manager, peer, peer_guid,
euf_guid, message=None):
if producer:
type = MediaSessionType.WEBCAM_SEND
else:
type = MediaSessionType.WEBCAM_RECV
P2PSession.__init__(self, session_manager, peer, peer_guid, euf_guid,
ApplicationID.WEBCAM, message)
MediaCall.__init__(self, type)
self._producer = producer
self._answered = False
self._sent_syn = False
self._session_id = self._generate_id(9999)
self._xml_needed = False
@property
def producer(self):
return self._producer
def invite(self):
self._answered = True
context = "{B8BE70DE-E2CA-4400-AE03-88FF85B9F4E8}"
context = context.decode('ascii').encode('utf-16_le')
self._invite(context)
def accept(self):
self._answered = True
temp_application_id = self._application_id
self._application_id = 0
self._accept()
self._application_id = temp_application_id
self.send_binary_syn()
def reject(self):
self._answered = True
self._decline(603)
def end(self, reason=None):
if not self._answered:
self.reject()
else:
context = '\x74\x03\x00\x81'
self._close(context, reason)
self.dispose()
def dispose(self):
MediaCall.dispose(self)
self._dispatch("on_call_ended")
self._dispose()
def on_media_session_prepared(self, session):
if self._xml_needed:
self._send_xml()
def _on_invite_received(self, message):
if self._producer:
stream = self.media_session.create_stream("video",
MediaStreamDirection.SENDING, False)
self.media_session.add_stream(stream)
def _on_bye_received(self, message):
self.dispose()
def _on_session_accepted(self):
self._dispatch("on_call_accepted")
def _on_session_rejected(self, message):
self._dispatch("on_call_rejected", message)
self.dispose()
def _on_data_blob_received(self, blob):
blob.data.seek(0, os.SEEK_SET)
data = blob.data.read()
data = unicode(data[10:], "utf-16-le").rstrip("\x00")
if not self._sent_syn:
self.send_binary_syn() #Send 603 first ?
if data == 'syn':
self.send_binary_ack()
elif data == 'ack' and self._producer:
self._send_xml()
elif '<producer>' in data or '<viewer>' in data:
self._handle_xml(data)
elif data.startswith('ReflData'):
refldata = data.split(':')[1]
str = ""
for i in range(0, len(refldata), 2):
str += chr(int(refldata[i:i+2], 16))
print "Got ReflData :", str
def send_data(self, data):
message_bytes = data.encode("utf-16-le") + "\x00\x00"
id = (self._generate_id() << 8) | 0x80
header = struct.pack("<LHL", id, 8, len(message_bytes))
self._send_data(header + message_bytes)
def send_binary_syn(self):
self.send_data('syn')
self._sent_syn = True
def send_binary_ack(self):
self.send_data('ack')
def send_binary_viewer_data(self):
self.send_data('receivedViewerData')
def _send_xml(self):
if not self.media_session.prepared:
self._xml_needed = True
return
logger.info("Send XML for session %i", self._session_id)
self._xml_needed = False
message = WebcamSessionMessage(session=self.media_session,
id=self._session_id, producer=self._producer)
self.send_data(str(message))
def _handle_xml(self, data):
message = WebcamSessionMessage(body=data, producer=self._producer)
initial = not self._producer
self.media_session.process_remote_message(message, initial)
self._session_id = message.id
logger.info("Received XML data for session %i", self._session_id)
if self._producer:
self.send_binary_viewer_data()
else:
self._send_xml()
class WebcamCandidateEncoder(MediaCandidateEncoder):
def __init__(self):
MediaCandidateEncoder.__init__(self)
def encode_candidates(self, desc, local_candidates, remote_candidates):
for candidate in local_candidates:
desc.ips.append(candidate.ip)
desc.ports.append(candidate.port)
desc.rid = int(local_candidates[0].foundation)
desc.sid = int(local_candidates[0].username)
def decode_candidates(self, desc):
local_candidates = []
remote_candidate = []
for ip in desc.ips:
for port in desc.ports:
candidate = MediaCandidate()
candidate.foundation = str(desc.rid)
candidate.component_id = 1
candidate.username = str(desc.sid)
candidate.password = ""
candidate.ip = ip
candidate.port = port
candidate.transport = "TCP"
candidate.priority = 1
local_candidates.append(candidate)
return local_candidates, remote_candidate
class WebcamSessionMessage(MediaSessionMessage):
def __init__(self, session=None, body=None, id=0, producer=False):
self._id = id
self._producer = producer
MediaSessionMessage.__init__(self, session, body)
@property
def id(self):
return self._id
@property
def producer(self):
return self._producer
def _create_stream_description(self, stream):
return WebcamStreamDescription(stream, self._id, self._producer)
def _parse(self, body):
tree = ElementTree.fromstring(body)
self._id = int(tree.find("session").text)
desc = self._create_stream_description(None)
self.descriptions.append(desc)
for node in tree.findall("tcp/*"):
if node.tag == "tcpport":
desc.ports.append(int(node.text))
elif node.tag.startswith("tcpipaddress"):
desc.ips.append(node.text)
desc.rid = tree.find("rid").text
return self._descriptions
def __str__(self):
tag = self.producer and "producer" or "viewer"
desc = self._descriptions[0]
body = "<%s>" \
"<version>2.0</version>" \
"<rid>%s</rid>" \
"<session>%u</session>" \
"<ctypes>0</ctypes>" \
"<cpu>2010</cpu>" % (tag, desc.rid, desc.sid)
body += "<tcp>" \
"<tcpport>%(port)u</tcpport>" \
"<tcplocalport>%(port)u</tcplocalport>" \
"<tcpexternalport>0</tcpexternalport>" % \
{"port": desc.ports[0]}
for i, addr in enumerate(desc.ips):
body += "<tcpipaddress%u>%s</tcpipaddress%u>" % (i + 1, addr, i + 1)
body += "</tcp>"
body += "<codec></codec><channelmode>2</channelmode>"
body += "</%s>\r\n\r\n" % tag
return body
class WebcamStreamDescription(MediaStreamDescription):
_candidate_encoder = WebcamCandidateEncoder()
def __init__(self, stream, sid, producer):
direction = producer and MediaStreamDirection.SENDING or \
MediaStreamDirection.RECEIVING
self._ips = []
self._ports = []
self._rid = None
self._sid = sid
MediaStreamDescription.__init__(self, stream, "video", direction)
@property
def candidate_encoder(self):
return self._candidate_encoder
@property
def ips(self):
return self._ips
@property
def ports(self):
return self._ports
@rw_property
def rid():
def fget(self):
return self._rid
def fset(self, value):
self._rid = value
return locals()
@rw_property
def sid():
def fget(self):
return self._sid
def fset(self, value):
self._sid = value
return locals()
| billiob/papyon | papyon/msnp2p/webcam.py | Python | gpl-2.0 | 9,665 | 0.001242 |
from skimage import measure
import numpy as np
import struct
import math as m
from PIL import Image
from simplify import simplify
import argparse
parser = argparse.ArgumentParser(description='convert apk heightmaps to floating point tiff')
parser.add_argument('file', type=str, help='the apk heightmap file')
args = parser.parse_args()
hdr=b'\x33\x13\x26\xc3\x33\x13\x26\x43\x02\x00\x20\xc1\x33\x13\xa1\x43'
with open(args.file, mode='rb') as file:
raw = file.read()
print(struct.unpack_from("<4xIII",raw,0x1020))
print(struct.unpack_from("<ffff",raw,0x1030))
t,w,h = struct.unpack_from("<4xIII",raw,0x1020)
e1,e2,e3,e4 = struct.unpack_from("<ffff",raw,0x1030)
dt = np.dtype("half")
dt = dt.newbyteorder('<')
img = np.frombuffer(raw,dtype=dt,offset=0x1040,count=w*h)
print (img.shape)
img = img.reshape((w,h))
imin = np.amin(img)
imax = np.amax(img)
extents = np.array((e1,e2,e3,e4))
np.savez_compressed(args.file, extents = extents, heightmap=img)
fimg = img.astype(np.float32)
fimg.reshape((w*h,1))
pimg = Image.frombytes('F',(w,h), fimg.tostring(),'raw','F;32NF')
pimg.save(args.file + ".tif")
hmin = e1 * (1-imin) + e2 * imin
hmax = e1 * (1-imax) + e2 * imax
contours = []
hstep = 2.5
nc = m.ceil((hmax-hmin)/hstep)
for i in range(nc):
hgt = imin + i*hstep/(hmax-hmin)
npc = measure.find_contours(img, hgt)
cs = []
for c in npc:
c = simplify(c,5,True)
cs.append(c)
cs = np.array(cs)
contours.append(cs)
np.savez_compressed(args.file+"-contours", *contours)
# mi,ma = float(np.amin(img)),float(np.amax(img))
# print("contour",mi,ma)
# for i in range(50):
# d = float(mi*(1-i/50)+ma*i/50)
# print("contour",d)
# npc = measure.find_contours(img, d)
# for n,c in enumerate(npc):
# contours = [((x[1]-512)/1024*3499.99975586*2,(x[0]-512)/1024*3499.99975586*2) for x in c]
# if norm(c[-1] - c[0]) < 0.01:
# self.canvas.create_polygon(contours,fill="",outline='red',tag="contour")
# else:
# self.canvas.create_line(contours,fill='green',tag="contour")
# except FileNotFoundError:
# print("file not found!")
# return
# try:
# self.img = Image.open(path)
# except:
# try:
# with open(path, mode='rb') as file:
# raw = file.read()
# self.img = Image.frombytes("F",(1024,1024),raw,"raw","F;16")
# print(self.img.getpixel((4,4)))
# f = 1.0 / 2**8
# self.img = self.img.point(lambda x: x * f)
# print(self.img.getpixel((4,4)))
# self.img = self.img.resize((8192,8192))
# self.img = self.img.filter(ImageFilter.CONTOUR)
# except FileNotFoundError:
# print("file not found!")
# return
# self.ix =2*3499.99975586
# f = self.ix/2049.0
# print (f)
# #self.img = self.img.transform((int(self.ix),int(self.ix)),Image.AFFINE,data=(f,0,0,0,f,0))
# self.img = self.img.resize((int(self.ix),int(self.ix)))
# self.simg = self.img
# self.pimg = ImageTk.PhotoImage(self.img)
# self.imgcid = self.canvas.create_image(-2048, -2048, image=self.pimg, anchor=tk.NW)
| tarnheld/ted-editor | hm/apkhm.py | Python | unlicense | 3,380 | 0.012722 |
# -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
@author: Jp Calderone
"""
__metaclass__ = type
import time
from zope.interface import implements
from twisted.python import reflect
from twisted.python.failure import Failure
from twisted.internet import base, defer
from twisted.internet.interfaces import IReactorTime
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@type running: C{bool}
@ivar running: A flag which is C{True} while C{f} is scheduled to be called
(or is currently being called). It is set to C{True} when L{start} is
called and set to C{False} when L{stop} is called or if C{f} raises an
exception. In either case, it will be C{False} by the time the
C{Deferred} returned by L{start} fires its callback or errback.
@type _expectNextCallAt: C{float}
@ivar _expectNextCallAt: The time at which this instance most recently
scheduled itself to run.
@type _realLastTime: C{float}
@ivar _realLastTime: When counting skips, the time at which the skip
counter was last invoked.
@type _runAtStart: C{bool}
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
to L{LoopingCall.start}.
"""
call = None
running = False
deferred = None
interval = None
_expectNextCallAt = 0.0
_runAtStart = False
starttime = None
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = reactor
def withCount(cls, countCallable):
"""
An alternate constructor for L{LoopingCall} that makes available the
number of calls which should have occurred since it was last invoked.
Note that this number is an C{int} value; It represents the discrete
number of calls that should have been made. For example, if you are
using a looping call to display an animation with discrete frames, this
number would be the number of frames to advance.
The count is normally 1, but can be higher. For example, if the reactor
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
returned from a previous call is not fired before an interval has
elapsed, or if the callable itself blocks for longer than an interval,
preventing I{itself} from being called.
@param countCallable: A callable that will be invoked each time the
resulting LoopingCall is run, with an integer specifying the number
of calls that should have been invoked.
@type countCallable: 1-argument callable which takes an C{int}
@return: An instance of L{LoopingCall} with call counting enabled,
which provides the count as the first positional argument.
@rtype: L{LoopingCall}
@since: 9.0
"""
def counter():
now = self.clock.seconds()
lastTime = self._realLastTime
if lastTime is None:
lastTime = self.starttime
if self._runAtStart:
lastTime -= self.interval
self._realLastTime = now
lastInterval = self._intervalOf(lastTime)
thisInterval = self._intervalOf(now)
count = thisInterval - lastInterval
return countCallable(count)
self = cls(counter)
self._realLastTime = None
return self
withCount = classmethod(withCount)
def _intervalOf(self, t):
"""
Determine the number of intervals passed as of the given point in
time.
@param t: The specified time (from the start of the L{LoopingCall}) to
be measured in intervals
@return: The C{int} number of intervals which have passed as of the
given point in time.
"""
elapsedTime = t - self.starttime
intervalNum = int(elapsedTime / self.interval)
return intervalNum
def start(self, interval, now=True):
"""
Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError, "interval must be >= 0"
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self.clock.seconds()
self._expectNextCallAt = self.starttime
self.interval = interval
self._runAtStart = now
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def reset(self):
"""
Skip the next iteration and reset the timer.
@since: 11.1
"""
assert self.running, ("Tried to reset a LoopingCall that was "
"not running.")
if self.call is not None:
self.call.cancel()
self.call = None
self._expectNextCallAt = self.clock.seconds()
self._reschedule()
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _reschedule(self):
"""
Schedule the next iteration of this looping call.
"""
if self.interval == 0:
self.call = self.clock.callLater(0, self)
return
currentTime = self.clock.seconds()
# Find how long is left until the interval comes around again.
untilNextTime = (self._expectNextCallAt - currentTime) % self.interval
# Make sure it is in the future, in case more than one interval worth
# of time passed since the previous call was made.
nextTime = max(
self._expectNextCallAt + self.interval, currentTime + untilNextTime)
# If the interval falls on the current time exactly, skip it and
# schedule the call for the next interval.
if nextTime == currentTime:
nextTime += self.interval
self._expectNextCallAt = nextTime
self.call = self.clock.callLater(nextTime - currentTime, self)
def __repr__(self):
if hasattr(self.f, 'func_name'):
func = self.f.func_name
if hasattr(self.f, 'im_class'):
func = self.f.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.f)
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
self.interval, func, reflect.safe_repr(self.a),
reflect.safe_repr(self.kw))
class SchedulerError(Exception):
"""
The operation could not be completed because the scheduler or one of its
tasks was in an invalid state. This exception should not be raised
directly, but is a superclass of various scheduler-state-related
exceptions.
"""
class SchedulerStopped(SchedulerError):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class TaskFinished(SchedulerError):
"""
The operation could not complete because the task was already completed,
stopped, encountered an error or otherwise permanently stopped running.
"""
class TaskDone(TaskFinished):
"""
The operation could not complete because the task was already completed.
"""
class TaskStopped(TaskFinished):
"""
The operation could not complete because the task was stopped.
"""
class TaskFailed(TaskFinished):
"""
The operation could not complete because the task died with an unhandled
error.
"""
class NotPaused(SchedulerError):
"""
This exception is raised when a task is resumed which was not previously
paused.
"""
class _Timer(object):
MAX_SLICE = 0.01
def __init__(self):
self.end = time.time() + self.MAX_SLICE
def __call__(self):
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(x):
from twisted.internet import reactor
return reactor.callLater(_EPSILON, x)
class CooperativeTask(object):
"""
A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
paused, resumed, and stopped. It can also have its completion (or
termination) monitored.
@see: L{CooperativeTask.cooperate}
@ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
asked to do work.
@ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
participates in, which is used to re-insert it upon resume.
@ivar _deferreds: the list of L{defer.Deferred}s to fire when this task
completes, fails, or finishes.
@type _deferreds: L{list}
@type _cooperator: L{Cooperator}
@ivar _pauseCount: the number of times that this L{CooperativeTask} has
been paused; if 0, it is running.
@type _pauseCount: L{int}
@ivar _completionState: The completion-state of this L{CooperativeTask}.
C{None} if the task is not yet completed, an instance of L{TaskStopped}
if C{stop} was called to stop this task early, of L{TaskFailed} if the
application code in the iterator raised an exception which caused it to
terminate, and of L{TaskDone} if it terminated normally via raising
L{StopIteration}.
@type _completionState: L{TaskFinished}
"""
def __init__(self, iterator, cooperator):
"""
A private constructor: to create a new L{CooperativeTask}, see
L{Cooperator.cooperate}.
"""
self._iterator = iterator
self._cooperator = cooperator
self._deferreds = []
self._pauseCount = 0
self._completionState = None
self._completionResult = None
cooperator._addTask(self)
def whenDone(self):
"""
Get a L{defer.Deferred} notification of when this task is complete.
@return: a L{defer.Deferred} that fires with the C{iterator} that this
L{CooperativeTask} was created with when the iterator has been
exhausted (i.e. its C{next} method has raised L{StopIteration}), or
fails with the exception raised by C{next} if it raises some other
exception.
@rtype: L{defer.Deferred}
"""
d = defer.Deferred()
if self._completionState is None:
self._deferreds.append(d)
else:
d.callback(self._completionResult)
return d
def pause(self):
"""
Pause this L{CooperativeTask}. Stop doing work until
L{CooperativeTask.resume} is called. If C{pause} is called more than
once, C{resume} must be called an equal number of times to resume this
task.
@raise TaskFinished: if this task has already finished or completed.
"""
self._checkFinish()
self._pauseCount += 1
if self._pauseCount == 1:
self._cooperator._removeTask(self)
def resume(self):
"""
Resume processing of a paused L{CooperativeTask}.
@raise NotPaused: if this L{CooperativeTask} is not paused.
"""
if self._pauseCount == 0:
raise NotPaused()
self._pauseCount -= 1
if self._pauseCount == 0 and self._completionState is None:
self._cooperator._addTask(self)
def _completeWith(self, completionState, deferredResult):
"""
@param completionState: a L{TaskFinished} exception or a subclass
thereof, indicating what exception should be raised when subsequent
operations are performed.
@param deferredResult: the result to fire all the deferreds with.
"""
self._completionState = completionState
self._completionResult = deferredResult
if not self._pauseCount:
self._cooperator._removeTask(self)
# The Deferreds need to be invoked after all this is completed, because
# a Deferred may want to manipulate other tasks in a Cooperator. For
# example, if you call "stop()" on a cooperator in a callback on a
# Deferred returned from whenDone(), this CooperativeTask must be gone
# from the Cooperator by that point so that _completeWith is not
# invoked reentrantly; that would cause these Deferreds to blow up with
# an AlreadyCalledError, or the _removeTask to fail with a ValueError.
for d in self._deferreds:
d.callback(deferredResult)
def stop(self):
"""
Stop further processing of this task.
@raise TaskFinished: if this L{CooperativeTask} has previously
completed, via C{stop}, completion, or failure.
"""
self._checkFinish()
self._completeWith(TaskStopped(), Failure(TaskStopped()))
def _checkFinish(self):
"""
If this task has been stopped, raise the appropriate subclass of
L{TaskFinished}.
"""
if self._completionState is not None:
raise self._completionState
def _oneWorkUnit(self):
"""
Perform one unit of work for this task, retrieving one item from its
iterator, stopping if there are no further items in the iterator, and
pausing if the result was a L{defer.Deferred}.
"""
try:
result = self._iterator.next()
except StopIteration:
self._completeWith(TaskDone(), self._iterator)
except:
self._completeWith(TaskFailed(), Failure())
else:
if isinstance(result, defer.Deferred):
self.pause()
def failLater(f):
self._completeWith(TaskFailed(), f)
result.addCallbacks(lambda result: self.resume(),
failLater)
class Cooperator(object):
"""
Cooperative task scheduler.
"""
def __init__(self,
terminationPredicateFactory=_Timer,
scheduler=_defaultScheduler,
started=True):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return True when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self._tasks = []
self._metarator = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall = None
self._stopped = False
self._started = started
def coiterate(self, iterator, doneDeferred=None):
"""
Add an iterator to the list of iterators this L{Cooperator} is
currently running.
@param doneDeferred: If specified, this will be the Deferred used as
the completion deferred. It is suggested that you use the default,
which creates a new Deferred for you.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = defer.Deferred()
CooperativeTask(iterator, self).whenDone().chainDeferred(doneDeferred)
return doneDeferred
def cooperate(self, iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return CooperativeTask(iterator, self)
def _addTask(self, task):
"""
Add a L{CooperativeTask} object to this L{Cooperator}.
"""
if self._stopped:
self._tasks.append(task) # XXX silly, I know, but _completeWith
# does the inverse
task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
else:
self._tasks.append(task)
self._reschedule()
def _removeTask(self, task):
"""
Remove a L{CooperativeTask} from this L{Cooperator}.
"""
self._tasks.remove(task)
# If no work left to do, cancel the delayed call:
if not self._tasks and self._delayedCall:
self._delayedCall.cancel()
self._delayedCall = None
def _tasksWhileNotStopped(self):
"""
Yield all L{CooperativeTask} objects in a loop as long as this
L{Cooperator}'s termination condition has not been met.
"""
terminator = self._terminationPredicateFactory()
while self._tasks:
for t in self._metarator:
yield t
if terminator():
return
self._metarator = iter(self._tasks)
def _tick(self):
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasksWhileNotStopped():
taskObj._oneWorkUnit()
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self):
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self._tasks:
self._delayedCall = self._scheduler(self._tick)
def start(self):
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self):
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for taskObj in self._tasks:
taskObj._completeWith(SchedulerStopped(),
Failure(SchedulerStopped()))
self._tasks = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
_theCooperator = Cooperator()
def coiterate(iterator):
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
"""
return _theCooperator.coiterate(iterator)
def cooperate(iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return _theCooperator.cooperate(iterator)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
implements(IReactorTime)
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a a time value
relative to the current time.
@rtype: C{float}
@return: The time which should be considered the current time.
"""
return self.rightNow
def _sortCalls(self):
"""
Sort the pending calls according to the time they are scheduled.
"""
self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime()))
def callLater(self, when, what, *a, **kw):
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
dc = base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds)
self.calls.append(dc)
self._sortCalls()
return dc
def getDelayedCalls(self):
"""
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
"""
return self.calls
def advance(self, amount):
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@type amount: C{float}
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
self._sortCalls()
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
self._sortCalls()
def pump(self, timings):
"""
Advance incrementally by the given set of times.
@type timings: iterable of C{float}
"""
for amount in timings:
self.advance(amount)
def deferLater(clock, delay, callable, *args, **kw):
"""
Call the given function after a certain period of time has passed.
@type clock: L{IReactorTime} provider
@param clock: The object which will be used to schedule the delayed
call.
@type delay: C{float} or C{int}
@param delay: The number of seconds to wait before calling the function.
@param callable: The object to call after the delay.
@param *args: The positional arguments to pass to C{callable}.
@param **kw: The keyword arguments to pass to C{callable}.
@rtype: L{defer.Deferred}
@return: A deferred that fires with the result of the callable when the
specified time has elapsed.
"""
def deferLaterCancel(deferred):
delayedCall.cancel()
d = defer.Deferred(deferLaterCancel)
d.addCallback(lambda ignored: callable(*args, **kw))
delayedCall = clock.callLater(delay, d.callback, None)
return d
__all__ = [
'LoopingCall',
'Clock',
'SchedulerStopped', 'Cooperator', 'coiterate',
'deferLater',
]
| Varriount/Colliberation | libs/twisted/internet/task.py | Python | mit | 24,723 | 0.002346 |
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax3d.projects.nesf.nerfstatic.utils.types."""
import jax.numpy as jnp
from jax3d.projects.nesf.nerfstatic.utils import types
import numpy as np
def test_bounding_box_simple():
bbox = types.BoundingBox3d(
min_corner=jnp.asarray([0, 0, 0]),
max_corner=jnp.asarray([1, 1, 1]))
rays = types.Rays(origin=jnp.asarray([10, 10, 10]),
direction=jnp.asarray([1, 1, 1]),
scene_id=None)
assert bbox.intersect_rays(rays) == (-10, -9)
def test_bounding_box_zero_dir():
bbox = types.BoundingBox3d(
min_corner=jnp.asarray([0, 0, 0]),
max_corner=jnp.asarray([1, 1, 1]))
rays = types.Rays(origin=jnp.asarray([10, 0.5, 0.5]),
direction=jnp.asarray([1, 0, 0]),
scene_id=None)
assert bbox.intersect_rays(rays) == (-10, -9)
def test_bounding_box_no_intersection():
bbox = types.BoundingBox3d(
min_corner=jnp.asarray([0, 0, 0]),
max_corner=jnp.asarray([1, 1, 1]))
rays = types.Rays(origin=jnp.asarray([10, 10, 10]),
direction=jnp.asarray([1, 0, 0]),
scene_id=None)
i = bbox.intersect_rays(rays)
assert i[1] < i[0]
def test_point_cloud():
h, w = 6, 8
normalize = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True)
rays = types.Rays(scene_id=np.zeros((h, w, 1), dtype=np.int32),
origin=np.random.rand(h, w, 3),
direction=normalize(np.random.randn(h, w, 3)))
views = types.Views(rays=rays,
depth=np.random.rand(h, w, 1),
semantics=np.random.randint(0, 5, size=(h, w, 1)))
# Construct point cloud.
point_cloud = views.point_cloud
# Only valid points.
assert np.all(point_cloud.points >= -1)
assert np.all(point_cloud.points <= 1)
# Size matches expected value.
assert (point_cloud.size ==
point_cloud.points.shape[0] ==
point_cloud.semantics.shape[0])
| google-research/jax3d | jax3d/projects/nesf/nerfstatic/utils/types_test.py | Python | apache-2.0 | 2,544 | 0.008648 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ..representation import SphericalRepresentation
from ..baseframe import BaseCoordinateFrame, TimeFrameAttribute
from .utils import EQUINOX_J2000
__all__ = ['GeocentricTrueEcliptic', 'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic']
class GeocentricTrueEcliptic(BaseCoordinateFrame):
"""
Geocentric ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
This frame has one frame attribute:
* ``equinox``
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth (necessary for transformation to
non-geocentric systems).
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
lon : `Angle`, optional, must be keyword
The ecliptic longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `Angle`, optional, must be keyword
The ecliptic latitude for this object (``lon`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object from the geocenter.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
class BarycentricTrueEcliptic(BaseCoordinateFrame):
"""
Barycentric ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
This frame has one frame attribute:
* ``equinox``
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
l : `Angle`, optional, must be keyword
The ecliptic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `Angle`, optional, must be keyword
The ecliptic latitude for this object (``l`` must also be given and
``representation`` must be None).
r : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object from the sun's center.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
class HeliocentricTrueEcliptic(BaseCoordinateFrame):
"""
Heliocentric ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
This frame has one frame attribute:
* ``equinox``
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
l : `Angle`, optional, must be keyword
The ecliptic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `Angle`, optional, must be keyword
The ecliptic latitude for this object (``l`` must also be given and
``representation`` must be None).
r : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object from the sun's center.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
| tbabej/astropy | astropy/coordinates/builtin_frames/ecliptic.py | Python | bsd-3-clause | 6,080 | 0.000987 |
#!/usr/bin/python
"""Preferences Editor for Terminator.
Load a UIBuilder config file, display it,
populate it with our current config, then optionally read that back out and
write it to a config file
"""
import os
import gtk
from util import dbg, err
import config
from keybindings import Keybindings, KeymapError
from translation import _
from encoding import TerminatorEncoding
from terminator import Terminator
from plugin import PluginRegistry
def color2hex(widget):
"""Pull the colour values out of a Gtk ColorPicker widget and return them
as 8bit hex values, sinces its default behaviour is to give 16bit values"""
widcol = widget.get_color()
return('#%02x%02x%02x' % (widcol.red>>8, widcol.green>>8, widcol.blue>>8))
# FIXME: We need to check that we have represented all of Config() below
class PrefsEditor:
"""Class implementing the various parts of the preferences editor"""
config = None
registry = None
plugins = None
keybindings = None
window = None
builder = None
layouteditor = None
previous_layout_selection = None
previous_profile_selection = None
colorschemevalues = {'black_on_yellow': 0,
'black_on_white': 1,
'grey_on_black': 2,
'green_on_black': 3,
'white_on_black': 4,
'orange_on_black': 5,
'ambience': 6,
'solarized_light': 7,
'solarized_dark': 8,
'custom': 9}
colourschemes = {'grey_on_black': ['#aaaaaa', '#000000'],
'black_on_yellow': ['#000000', '#ffffdd'],
'black_on_white': ['#000000', '#ffffff'],
'white_on_black': ['#ffffff', '#000000'],
'green_on_black': ['#00ff00', '#000000'],
'orange_on_black': ['#e53c00', '#000000'],
'ambience': ['#ffffff', '#300a24'],
'solarized_light': ['#657b83', '#fdf6e3'],
'solarized_dark': ['#839496', '#002b36']}
palettevalues = {'tango': 0,
'linux': 1,
'xterm': 2,
'rxvt': 3,
'ambience': 4,
'solarized': 5,
'custom': 6}
palettes = {'tango': '#000000:#cc0000:#4e9a06:#c4a000:#3465a4:\
#75507b:#06989a:#d3d7cf:#555753:#ef2929:#8ae234:#fce94f:#729fcf:\
#ad7fa8:#34e2e2:#eeeeec',
'linux': '#000000:#aa0000:#00aa00:#aa5500:#0000aa:\
#aa00aa:#00aaaa:#aaaaaa:#555555:#ff5555:#55ff55:#ffff55:#5555ff:\
#ff55ff:#55ffff:#ffffff',
'xterm': '#000000:#cd0000:#00cd00:#cdcd00:#1e90ff:\
#cd00cd:#00cdcd:#e5e5e5:#4c4c4c:#ff0000:#00ff00:#ffff00:#4682b4:\
#ff00ff:#00ffff:#ffffff',
'rxvt': '#000000:#cd0000:#00cd00:#cdcd00:#0000cd:\
#cd00cd:#00cdcd:#faebd7:#404040:#ff0000:#00ff00:#ffff00:#0000ff:\
#ff00ff:#00ffff:#ffffff',
'ambience': '#2e3436:#cc0000:#4e9a06:#c4a000:\
#3465a4:#75507b:#06989a:#d3d7cf:#555753:#ef2929:#8ae234:#fce94f:\
#729fcf:#ad7fa8:#34e2e2:#eeeeec',
'solarized': '#073642:#dc322f:#859900:#b58900:\
#268bd2:#d33682:#2aa198:#eee8d5:#002b36:#cb4b16:#586e75:#657b83:\
#839496:#6c71c4:#93a1a1:#fdf6e3'}
keybindingnames = { 'zoom_in' : 'Increase font size',
'zoom_out' : 'Decrease font size',
'zoom_normal' : 'Restore original font size',
'new_tab' : 'Create a new tab',
'cycle_next' : 'Focus the next terminal',
'cycle_prev' : 'Focus the previous terminal',
'go_next' : 'Focus the next terminal',
'go_prev' : 'Focus the previous terminal',
'go_up' : 'Focus the terminal above',
'go_down' : 'Focus the terminal below',
'go_left' : 'Focus the terminal left',
'go_right' : 'Focus the terminal right',
'rotate_cw' : 'Rotate terminals clockwise',
'rotate_ccw' : 'Rotate terminals counter-clockwise',
'split_horiz' : 'Split horizontally',
'split_vert' : 'Split vertically',
'close_term' : 'Close terminal',
'copy' : 'Copy selected text',
'paste' : 'Paste clipboard',
'toggle_scrollbar' : 'Show/Hide the scrollbar',
'search' : 'Search terminal scrollback',
'close_window' : 'Close window',
'resize_up' : 'Resize the terminal up',
'resize_down' : 'Resize the terminal down',
'resize_left' : 'Resize the terminal left',
'resize_right' : 'Resize the terminal right',
'move_tab_right' : 'Move the tab right',
'move_tab_left' : 'Move the tab left',
'toggle_zoom' : 'Maximise terminal',
'scaled_zoom' : 'Zoom terminal',
'next_tab' : 'Switch to the next tab',
'prev_tab' : 'Switch to the previous tab',
'switch_to_tab_1' : 'Switch to the first tab',
'switch_to_tab_2' : 'Switch to the second tab',
'switch_to_tab_3' : 'Switch to the third tab',
'switch_to_tab_4' : 'Switch to the fourth tab',
'switch_to_tab_5' : 'Switch to the fifth tab',
'switch_to_tab_6' : 'Switch to the sixth tab',
'switch_to_tab_7' : 'Switch to the seventh tab',
'switch_to_tab_8' : 'Switch to the eighth tab',
'switch_to_tab_9' : 'Switch to the ninth tab',
'switch_to_tab_10' : 'Switch to the tenth tab',
'full_screen' : 'Toggle fullscreen',
'reset' : 'Reset the terminal',
'reset_clear' : 'Reset and clear the terminal',
'hide_window' : 'Toggle window visibility',
'group_all' : 'Group all terminals',
'ungroup_all' : 'Ungroup all terminals',
'group_tab' : 'Group terminals in tab',
'ungroup_tab' : 'Ungroup terminals in tab',
'new_window' : 'Create a new window',
'new_terminator' : 'Spawn a new Terminator process',
'broadcast_off' : 'Don\'t broadcast key presses',
'broadcast_group' : 'Broadcast key presses to group',
'broadcast_all' : 'Broadcast key events to all',
'insert_number' : 'Insert terminal number',
'insert_padded' : 'Insert zero padded terminal number',
'edit_window_title': 'Edit window title'
}
def __init__ (self, term):
self.config = config.Config()
self.term = term
self.builder = gtk.Builder()
self.keybindings = Keybindings()
try:
# Figure out where our library is on-disk so we can open our
(head, _tail) = os.path.split(config.__file__)
librarypath = os.path.join(head, 'preferences.glade')
gladefile = open(librarypath, 'r')
gladedata = gladefile.read()
except Exception, ex:
print "Failed to find preferences.glade"
print ex
return
self.builder.add_from_string(gladedata)
self.window = self.builder.get_object('prefswin')
self.layouteditor = LayoutEditor(self.builder)
self.builder.connect_signals(self)
self.layouteditor.prepare()
self.window.show_all()
try:
self.config.inhibit_save()
self.set_values()
except Exception, e:
err('Unable to set values: %s' % e)
self.config.uninhibit_save()
def on_closebutton_clicked(self, _button):
"""Close the window"""
terminator = Terminator()
terminator.reconfigure()
self.window.destroy()
del(self)
def set_values(self):
"""Update the preferences window with all the configuration from
Config()"""
guiget = self.builder.get_object
## Global tab
# Mouse focus
focus = self.config['focus']
active = 0
if focus == 'click':
active = 1
elif focus in ['sloppy', 'mouse']:
active = 2
widget = guiget('focuscombo')
widget.set_active(active)
# Terminal separator size
termsepsize = self.config['handle_size']
widget = guiget('handlesize')
widget.set_value(float(termsepsize))
# Window geometry hints
geomhint = self.config['geometry_hinting']
widget = guiget('wingeomcheck')
widget.set_active(geomhint)
# Window state
option = self.config['window_state']
if option == 'hidden':
active = 1
elif option == 'maximise':
active = 2
elif option == 'fullscreen':
active = 3
else:
active = 0
widget = guiget('winstatecombo')
widget.set_active(active)
# Window borders
widget = guiget('winbordercheck')
widget.set_active(not self.config['borderless'])
# Tab bar position
option = self.config['tab_position']
widget = guiget('tabposcombo')
if option == 'bottom':
active = 1
elif option == 'left':
active = 2
elif option == 'right':
active = 3
elif option == 'hidden':
active = 4
else:
active = 0
widget.set_active(active)
# DBus Server
widget = guiget('dbuscheck')
widget.set_active(self.config['dbus'])
#Hide from taskbar
widget = guiget('hidefromtaskbcheck')
widget.set_active(self.config['hide_from_taskbar'])
#Always on top
widget = guiget('alwaysontopcheck')
widget.set_active(self.config['always_on_top'])
#Hide on lose focus
widget = guiget('hideonlosefocuscheck')
widget.set_active(self.config['hide_on_lose_focus'])
#Show on all workspaces
widget = guiget('stickycheck')
widget.set_active(self.config['sticky'])
#Hide size text from the title bar
widget = guiget('title_hide_sizetextcheck')
widget.set_active(self.config['title_hide_sizetext'])
#Always split with profile
widget = guiget('always_split_with_profile')
widget.set_active(self.config['always_split_with_profile'])
## Profile tab
# Populate the profile list
widget = guiget('profilelist')
liststore = widget.get_model()
profiles = self.config.list_profiles()
self.profileiters = {}
for profile in profiles:
if profile == 'default':
editable = False
else:
editable = True
self.profileiters[profile] = liststore.append([profile, editable])
selection = widget.get_selection()
selection.connect('changed', self.on_profile_selection_changed)
selection.select_iter(self.profileiters['default'])
## Layouts tab
widget = guiget('layoutlist')
liststore = widget.get_model()
layouts = self.config.list_layouts()
self.layoutiters = {}
for layout in layouts:
if layout == 'default':
editable = False
else:
editable = True
self.layoutiters[layout] = liststore.append([layout, editable])
selection = widget.get_selection()
selection.connect('changed', self.on_layout_selection_changed)
selection.select_iter(self.layoutiters['default'])
# Now set up the selection changed handler for the layout itself
widget = guiget('LayoutTreeView')
selection = widget.get_selection()
selection.connect('changed', self.on_layout_item_selection_changed)
## Keybindings tab
widget = guiget('keybindingtreeview')
liststore = widget.get_model()
liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
keybindings = self.config['keybindings']
for keybinding in keybindings:
keyval = 0
mask = 0
value = keybindings[keybinding]
if value is not None and value != '':
try:
(keyval, mask) = self.keybindings._parsebinding(value)
except KeymapError:
pass
liststore.append([keybinding, self.keybindingnames[keybinding],
keyval, mask])
## Plugins tab
# Populate the plugin list
widget = guiget('pluginlist')
liststore = widget.get_model()
self.registry = PluginRegistry()
self.pluginiters = {}
pluginlist = self.registry.get_available_plugins()
self.plugins = {}
for plugin in pluginlist:
self.plugins[plugin] = self.registry.is_enabled(plugin)
for plugin in self.plugins:
self.pluginiters[plugin] = liststore.append([plugin,
self.plugins[plugin]])
selection = widget.get_selection()
selection.connect('changed', self.on_plugin_selection_changed)
if len(self.pluginiters) > 0:
selection.select_iter(liststore.get_iter_first())
def set_profile_values(self, profile):
"""Update the profile values for a given profile"""
self.config.set_profile(profile)
guiget = self.builder.get_object
dbg('PrefsEditor::set_profile_values: Setting profile %s' % profile)
## General tab
# Use system font
widget = guiget('system_font_checkbutton')
widget.set_active(self.config['use_system_font'])
self.on_system_font_checkbutton_toggled(widget)
# Font selector
widget = guiget('font_selector')
if self.config['use_system_font'] == True:
fontname = self.config.get_system_font()
if fontname is not None:
widget.set_font_name(fontname)
else:
widget.set_font_name(self.config['font'])
# Allow bold text
widget = guiget('allow_bold_checkbutton')
widget.set_active(self.config['allow_bold'])
# Anti-alias
widget = guiget('antialias_checkbutton')
widget.set_active(self.config['antialias'])
# Icon terminal bell
widget = guiget('icon_bell_checkbutton')
widget.set_active(self.config['icon_bell'])
# Visual terminal bell
widget = guiget('visual_bell_checkbutton')
widget.set_active(self.config['visible_bell'])
# Audible terminal bell
widget = guiget('audible_bell_checkbutton')
widget.set_active(self.config['audible_bell'])
# WM_URGENT terminal bell
widget = guiget('urgent_bell_checkbutton')
widget.set_active(self.config['urgent_bell'])
# Show titlebar
widget = guiget('show_titlebar')
widget.set_active(self.config['show_titlebar'])
# Copy on selection
widget = guiget('copy_on_selection')
widget.set_active(self.config['copy_on_selection'])
# Word chars
widget = guiget('word_chars_entry')
widget.set_text(self.config['word_chars'])
# Cursor shape
widget = guiget('cursor_shape_combobox')
if self.config['cursor_shape'] == 'underline':
active = 1
elif self.config['cursor_shape'] == 'ibeam':
active = 2
else:
active = 0
widget.set_active(active)
# Cursor blink
widget = guiget('cursor_blink')
widget.set_active(self.config['cursor_blink'])
# Cursor colour
widget = guiget('cursor_color')
try:
widget.set_color(gtk.gdk.Color(self.config['cursor_color']))
except ValueError:
self.config['cursor_color'] = "#FFFFFF"
widget.set_color(gtk.gdk.Color(self.config['cursor_color']))
## Command tab
# Login shell
widget = guiget('login_shell_checkbutton')
widget.set_active(self.config['login_shell'])
# Login records
widget = guiget('update_records_checkbutton')
widget.set_active(self.config['update_records'])
# Use Custom command
widget = guiget('use_custom_command_checkbutton')
widget.set_active(self.config['use_custom_command'])
self.on_use_custom_command_checkbutton_toggled(widget)
# Custom Command
widget = guiget('custom_command_entry')
widget.set_text(self.config['custom_command'])
# Exit action
widget = guiget('exit_action_combobox')
if self.config['exit_action'] == 'restart':
widget.set_active(1)
elif self.config['exit_action'] == 'hold':
widget.set_active(2)
else:
# Default is to close the terminal
widget.set_active(0)
## Colors tab
# Use system colors
widget = guiget('use_theme_colors_checkbutton')
widget.set_active(self.config['use_theme_colors'])
# Colorscheme
widget = guiget('color_scheme_combobox')
scheme = None
for ascheme in self.colourschemes:
forecol = self.colourschemes[ascheme][0]
backcol = self.colourschemes[ascheme][1]
if self.config['foreground_color'].lower() == forecol and \
self.config['background_color'].lower() == backcol:
scheme = ascheme
break
if scheme not in self.colorschemevalues:
if self.config['foreground_color'] in [None, ''] or \
self.config['background_color'] in [None, '']:
scheme = 'grey_on_black'
else:
scheme = 'custom'
# NOTE: The scheme is set in the GUI widget after the fore/back colours
# Foreground color
widget = guiget('foreground_colorpicker')
widget.set_color(gtk.gdk.Color(self.config['foreground_color']))
if scheme == 'custom':
widget.set_sensitive(True)
else:
widget.set_sensitive(False)
# Background color
widget = guiget('background_colorpicker')
widget.set_color(gtk.gdk.Color(self.config['background_color']))
if scheme == 'custom':
widget.set_sensitive(True)
else:
widget.set_sensitive(False)
# Now actually set the scheme
widget = guiget('color_scheme_combobox')
widget.set_active(self.colorschemevalues[scheme])
# Palette scheme
widget = guiget('palette_combobox')
palette = None
for apalette in self.palettes:
if self.config['palette'].lower() == self.palettes[apalette]:
palette = apalette
if palette not in self.palettevalues:
if self.config['palette'] in [None, '']:
palette = 'rxvt'
else:
palette = 'custom'
# NOTE: The palette selector is set after the colour pickers
# Palette colour pickers
colourpalette = self.config['palette'].split(':')
for i in xrange(1, 17):
widget = guiget('palette_colorpicker_%d' % i)
widget.set_color(gtk.gdk.Color(colourpalette[i - 1]))
# Now set the palette selector widget
widget = guiget('palette_combobox')
widget.set_active(self.palettevalues[palette])
# Titlebar colors
for bit in ['title_transmit_fg_color', 'title_transmit_bg_color',
'title_receive_fg_color', 'title_receive_bg_color',
'title_inactive_fg_color', 'title_inactive_bg_color']:
widget = guiget(bit)
widget.set_color(gtk.gdk.Color(self.config[bit]))
# Inactive terminal shading
widget = guiget('inactive_color_offset')
widget.set_value(float(self.config['inactive_color_offset']))
# Use custom URL handler
widget = guiget('use_custom_url_handler_checkbox')
widget.set_active(self.config['use_custom_url_handler'])
self.on_use_custom_url_handler_checkbutton_toggled(widget)
# Custom URL handler
widget = guiget('custom_url_handler_entry')
widget.set_text(self.config['custom_url_handler'])
## Background tab
# Radio values
if self.config['background_type'] == 'solid':
guiget('solid_radiobutton').set_active(True)
elif self.config['background_type'] == 'image':
guiget('image_radiobutton').set_active(True)
elif self.config['background_type'] == 'transparent':
guiget('transparent_radiobutton').set_active(True)
self.update_background_tab()
# Background image file
if self.config['background_image'] != '':
widget = guiget('background_image_filechooser')
if self.config['background_image'] is not None and \
self.config['background_image'] != '':
widget.set_filename(self.config['background_image'])
# Background image scrolls
widget = guiget('scroll_background_checkbutton')
widget.set_active(self.config['scroll_background'])
# Background shading
widget = guiget('background_darkness_scale')
widget.set_value(float(self.config['background_darkness']))
## Scrolling tab
# Scrollbar position
widget = guiget('scrollbar_position_combobox')
value = self.config['scrollbar_position']
if value == 'left':
widget.set_active(0)
elif value in ['disabled', 'hidden']:
widget.set_active(2)
else:
widget.set_active(1)
# Scrollback lines
widget = guiget('scrollback_lines_spinbutton')
widget.set_value(self.config['scrollback_lines'])
# Scrollback infinite
widget = guiget('scrollback_infinite')
widget.set_active(self.config['scrollback_infinite'])
# Scroll on outut
widget = guiget('scroll_on_output_checkbutton')
widget.set_active(self.config['scroll_on_output'])
# Scroll on keystroke
widget = guiget('scroll_on_keystroke_checkbutton')
widget.set_active(self.config['scroll_on_keystroke'])
# Scroll in alternate mode
widget = guiget('alternate_screen_scroll_checkbutton')
widget.set_active(self.config['alternate_screen_scroll'])
## Compatibility tab
# Backspace key
widget = guiget('backspace_binding_combobox')
value = self.config['backspace_binding']
if value == 'control-h':
widget.set_active(1)
elif value == 'ascii-del':
widget.set_active(2)
elif value == 'escape-sequence':
widget.set_active(3)
else:
widget.set_active(0)
# Delete key
widget = guiget('delete_binding_combobox')
value = self.config['delete_binding']
if value == 'control-h':
widget.set_active(1)
elif value == 'ascii-del':
widget.set_active(2)
elif value == 'escape-sequence':
widget.set_active(3)
else:
widget.set_active(0)
# Encoding
rowiter = None
widget = guiget('encoding_combobox')
encodingstore = guiget('EncodingListStore')
value = self.config['encoding']
encodings = TerminatorEncoding().get_list()
encodings.sort(lambda x, y: cmp(x[2].lower(), y[2].lower()))
for encoding in encodings:
if encoding[1] is None:
continue
label = "%s %s" % (encoding[2], encoding[1])
rowiter = encodingstore.append([label, encoding[1]])
if encoding[1] == value:
widget.set_active_iter(rowiter)
def set_layout(self, layout_name):
"""Set a layout"""
self.layouteditor.set_layout(layout_name)
def on_wingeomcheck_toggled(self, widget):
"""Window geometry setting changed"""
self.config['geometry_hinting'] = widget.get_active()
self.config.save()
def on_dbuscheck_toggled(self, widget):
"""DBus server setting changed"""
self.config['dbus'] = widget.get_active()
self.config.save()
def on_winbordercheck_toggled(self, widget):
"""Window border setting changed"""
self.config['borderless'] = not widget.get_active()
self.config.save()
def on_hidefromtaskbcheck_toggled(self, widget):
"""Hide from taskbar setting changed"""
self.config['hide_from_taskbar'] = widget.get_active()
self.config.save()
def on_alwaysontopcheck_toggled(self, widget):
"""Always on top setting changed"""
self.config['always_on_top'] = widget.get_active()
self.config.save()
def on_hideonlosefocuscheck_toggled(self, widget):
"""Hide on lose focus setting changed"""
self.config['hide_on_lose_focus'] = widget.get_active()
self.config.save()
def on_stickycheck_toggled(self, widget):
"""Sticky setting changed"""
self.config['sticky'] = widget.get_active()
self.config.save()
def on_title_hide_sizetextcheck_toggled(self, widget):
"""Window geometry setting changed"""
self.config['title_hide_sizetext'] = widget.get_active()
self.config.save()
def on_always_split_with_profile_toggled(self, widget):
"""Always split with profile setting changed"""
self.config['always_split_with_profile'] = widget.get_active()
self.config.save()
def on_allow_bold_checkbutton_toggled(self, widget):
"""Allow bold setting changed"""
self.config['allow_bold'] = widget.get_active()
self.config.save()
def on_antialias_checkbutton_toggled(self, widget):
"""Anti-alias setting changed"""
self.config['antialias'] = widget.get_active()
self.config.save()
def on_show_titlebar_toggled(self, widget):
"""Show titlebar setting changed"""
self.config['show_titlebar'] = widget.get_active()
self.config.save()
def on_copy_on_selection_toggled(self, widget):
"""Copy on selection setting changed"""
self.config['copy_on_selection'] = widget.get_active()
self.config.save()
def on_cursor_blink_toggled(self, widget):
"""Cursor blink setting changed"""
self.config['cursor_blink'] = widget.get_active()
self.config.save()
def on_icon_bell_checkbutton_toggled(self, widget):
"""Icon bell setting changed"""
self.config['icon_bell'] = widget.get_active()
self.config.save()
def on_visual_bell_checkbutton_toggled(self, widget):
"""Visual bell setting changed"""
self.config['visible_bell'] = widget.get_active()
self.config.save()
def on_audible_bell_checkbutton_toggled(self, widget):
"""Audible bell setting changed"""
self.config['audible_bell'] = widget.get_active()
self.config.save()
def on_urgent_bell_checkbutton_toggled(self, widget):
"""Window manager bell setting changed"""
self.config['urgent_bell'] = widget.get_active()
self.config.save()
def on_login_shell_checkbutton_toggled(self, widget):
"""Login shell setting changed"""
self.config['login_shell'] = widget.get_active()
self.config.save()
def on_update_records_checkbutton_toggled(self, widget):
"""Update records setting changed"""
self.config['update_records'] = widget.get_active()
self.config.save()
def on_scroll_background_checkbutton_toggled(self, widget):
"""Scroll background setting changed"""
self.config['scroll_background'] = widget.get_active()
self.config.save()
def on_alternate_screen_scroll_checkbutton_toggled(self, widget):
"""Scroll in alt-mode setting changed"""
self.config['alternate_screen_scroll'] = widget.get_active()
self.config.save()
def on_scroll_on_keystroke_checkbutton_toggled(self, widget):
"""Scroll on keystrong setting changed"""
self.config['scroll_on_keystroke'] = widget.get_active()
self.config.save()
def on_scroll_on_output_checkbutton_toggled(self, widget):
"""Scroll on output setting changed"""
self.config['scroll_on_output'] = widget.get_active()
self.config.save()
def on_delete_binding_combobox_changed(self, widget):
"""Delete binding setting changed"""
selected = widget.get_active()
if selected == 1:
value = 'control-h'
elif selected == 2:
value = 'ascii-del'
elif selected == 3:
value = 'escape-sequence'
else:
value = 'automatic'
self.config['delete_binding'] = value
self.config.save()
def on_backspace_binding_combobox_changed(self, widget):
"""Backspace binding setting changed"""
selected = widget.get_active()
if selected == 1:
value = 'control-h'
elif selected == 2:
value = 'ascii-del'
elif selected == 3:
value == 'escape-sequence'
else:
value = 'automatic'
self.config['backspace_binding'] = value
self.config.save()
def on_encoding_combobox_changed(self, widget):
"""Encoding setting changed"""
selected = widget.get_active_iter()
liststore = widget.get_model()
value = liststore.get_value(selected, 1)
self.config['encoding'] = value
self.config.save()
def on_scrollback_lines_spinbutton_value_changed(self, widget):
"""Scrollback lines setting changed"""
value = widget.get_value_as_int()
self.config['scrollback_lines'] = value
self.config.save()
def on_scrollback_infinite_toggled(self, widget):
"""Scrollback infiniteness changed"""
spinbutton = self.builder.get_object('scrollback_lines_spinbutton')
value = widget.get_active()
if value == True:
spinbutton.set_sensitive(False)
else:
spinbutton.set_sensitive(True)
self.config['scrollback_infinite'] = value
self.config.save()
def on_scrollbar_position_combobox_changed(self, widget):
"""Scrollbar position setting changed"""
selected = widget.get_active()
if selected == 1:
value = 'right'
elif selected == 2:
value = 'hidden'
else:
value = 'left'
self.config['scrollbar_position'] = value
self.config.save()
def on_darken_background_scale_change_value(self, widget, scroll, value):
"""Background darkness setting changed"""
self.config['background_darkness'] = round(value, 2)
self.config.save()
def on_background_image_filechooser_file_set(self, widget):
"""Background image setting changed"""
self.config['background_image'] = widget.get_filename()
self.config.save()
def on_palette_combobox_changed(self, widget):
"""Palette selector changed"""
value = None
guiget = self.builder.get_object
active = widget.get_active()
for key in self.palettevalues.keys():
if self.palettevalues[key] == active:
value = key
if value == 'custom':
sensitive = True
else:
sensitive = False
for num in xrange(1, 17):
picker = guiget('palette_colorpicker_%d' % num)
picker.set_sensitive(sensitive)
if value in self.palettes:
palette = self.palettes[value]
palettebits = palette.split(':')
for num in xrange(1, 17):
# Update the visible elements
picker = guiget('palette_colorpicker_%d' % num)
picker.set_color(gtk.gdk.Color(palettebits[num - 1]))
elif value == 'custom':
palettebits = []
for num in xrange(1, 17):
picker = guiget('palette_colorpicker_%d' % num)
palettebits.append(color2hex(picker))
palette = ':'.join(palettebits)
else:
err('Unknown palette value: %s' % value)
return
self.config['palette'] = palette
self.config.save()
def on_background_colorpicker_color_set(self, widget):
"""Background color changed"""
self.config['background_color'] = color2hex(widget)
self.config.save()
def on_foreground_colorpicker_color_set(self, widget):
"""Foreground color changed"""
self.config['foreground_color'] = color2hex(widget)
self.config.save()
def on_palette_colorpicker_color_set(self, widget):
"""A palette colour changed"""
palette = None
palettebits = []
guiget = self.builder.get_object
# FIXME: We do this at least once elsewhere. refactor!
for num in xrange(1, 17):
picker = guiget('palette_colorpicker_%d' % num)
value = color2hex(picker)
palettebits.append(value)
palette = ':'.join(palettebits)
self.config['palette'] = palette
self.config.save()
def on_exit_action_combobox_changed(self, widget):
"""Exit action changed"""
selected = widget.get_active()
if selected == 1:
value = 'restart'
elif selected == 2:
value = 'hold'
else:
value = 'close'
self.config['exit_action'] = value
self.config.save()
def on_custom_url_handler_entry_changed(self, widget):
"""Custom URL handler value changed"""
self.config['custom_url_handler'] = widget.get_text()
self.config.save()
def on_custom_command_entry_changed(self, widget):
"""Custom command value changed"""
self.config['custom_command'] = widget.get_text()
self.config.save()
def on_cursor_color_color_set(self, widget):
"""Cursor colour changed"""
self.config['cursor_color'] = color2hex(widget)
self.config.save()
def on_cursor_shape_combobox_changed(self, widget):
"""Cursor shape changed"""
selected = widget.get_active()
if selected == 1:
value = 'underline'
elif selected == 2:
value = 'ibeam'
else:
value = 'block'
self.config['cursor_shape'] = value
self.config.save()
def on_word_chars_entry_changed(self, widget):
"""Word characters changed"""
self.config['word_chars'] = widget.get_text()
self.config.save()
def on_font_selector_font_set(self, widget):
"""Font changed"""
self.config['font'] = widget.get_font_name()
self.config.save()
def on_title_receive_bg_color_color_set(self, widget):
"""Title receive background colour changed"""
self.config['title_receive_bg_color'] = color2hex(widget)
self.config.save()
def on_title_receive_fg_color_color_set(self, widget):
"""Title receive foreground colour changed"""
self.config['title_receive_fg_color'] = color2hex(widget)
self.config.save()
def on_title_inactive_bg_color_color_set(self, widget):
"""Title inactive background colour changed"""
self.config['title_inactive_bg_color'] = color2hex(widget)
self.config.save()
def on_title_transmit_bg_color_color_set(self, widget):
"""Title transmit backgruond colour changed"""
self.config['title_transmit_bg_color'] = color2hex(widget)
self.config.save()
def on_title_inactive_fg_color_color_set(self, widget):
"""Title inactive foreground colour changed"""
self.config['title_inactive_fg_color'] = color2hex(widget)
self.config.save()
def on_title_transmit_fg_color_color_set(self, widget):
"""Title transmit foreground colour changed"""
self.config['title_transmit_fg_color'] = color2hex(widget)
self.config.save()
def on_inactive_color_offset_change_value(self, widget, scroll, value):
"""Inactive color offset setting changed"""
self.config['inactive_color_offset'] = round(value, 2)
self.config.save()
def on_handlesize_change_value(self, widget, scroll, value):
"""Handle size changed"""
value = int(value)
if value > 5:
value = 5
self.config['handle_size'] = value
self.config.save()
def on_focuscombo_changed(self, widget):
"""Focus type changed"""
selected = widget.get_active()
if selected == 1:
value = 'click'
elif selected == 2:
value = 'mouse'
else:
value = 'system'
self.config['focus'] = value
self.config.save()
def on_tabposcombo_changed(self, widget):
"""Tab position changed"""
selected = widget.get_active()
if selected == 1:
value = 'bottom'
elif selected == 2:
value = 'left'
elif selected == 3:
value = 'right'
elif selected == 4:
value = 'hidden'
else:
value = 'top'
self.config['tab_position'] = value
self.config.save()
def on_winstatecombo_changed(self, widget):
"""Window state changed"""
selected = widget.get_active()
if selected == 1:
value = 'hidden'
elif selected == 2:
value = 'maximise'
elif selected == 3:
value = 'fullscreen'
else:
value = 'normal'
self.config['window_state'] = value
self.config.save()
def on_profileaddbutton_clicked(self, _button):
"""Add a new profile to the list"""
guiget = self.builder.get_object
treeview = guiget('profilelist')
model = treeview.get_model()
values = [ r[0] for r in model ]
newprofile = _('New Profile')
if newprofile in values:
i = 1
while newprofile in values:
i = i + 1
newprofile = '%s %d' % (_('New Profile'), i)
if self.config.add_profile(newprofile):
res = model.append([newprofile, True])
if res:
path = model.get_path(res)
treeview.set_cursor(path, focus_column=treeview.get_column(0),
start_editing=True)
self.layouteditor.update_profiles()
def on_profileremovebutton_clicked(self, _button):
"""Remove a profile from the list"""
guiget = self.builder.get_object
treeview = guiget('profilelist')
selection = treeview.get_selection()
(model, rowiter) = selection.get_selected()
profile = model.get_value(rowiter, 0)
if profile == 'default':
# We shouldn't let people delete this profile
return
self.previous_profile_selection = None
self.config.del_profile(profile)
model.remove(rowiter)
selection.select_iter(model.get_iter_first())
self.layouteditor.update_profiles()
def on_layoutaddbutton_clicked(self, _button):
"""Add a new layout to the list"""
terminator = Terminator()
current_layout = terminator.describe_layout()
guiget = self.builder.get_object
treeview = guiget('layoutlist')
model = treeview.get_model()
values = [ r[0] for r in model ]
name = _('New Layout')
if name in values:
i = 1
while name in values:
i = i + 1
name = '%s %d' % (_('New Layout'), i)
if self.config.add_layout(name, current_layout):
res = model.append([name, True])
if res:
path = model.get_path(res)
treeview.set_cursor(path, focus_column=treeview.get_column(0),
start_editing=True)
self.config.save()
def on_layoutremovebutton_clicked(self, _button):
"""Remove a layout from the list"""
guiget = self.builder.get_object
treeview = guiget('layoutlist')
selection = treeview.get_selection()
(model, rowiter) = selection.get_selected()
layout = model.get_value(rowiter, 0)
if layout == 'default':
# We shouldn't let people delete this layout
return
self.previous_selection = None
self.config.del_layout(layout)
model.remove(rowiter)
selection.select_iter(model.get_iter_first())
self.config.save()
def on_use_custom_url_handler_checkbutton_toggled(self, checkbox):
"""Toggling the use_custom_url_handler checkbox needs to alter the
sensitivity of the custom_url_handler entrybox"""
guiget = self.builder.get_object
widget = guiget('custom_url_handler_entry')
value = checkbox.get_active()
widget.set_sensitive(value)
self.config['use_custom_url_handler'] = value
self.config.save()
def on_use_custom_command_checkbutton_toggled(self, checkbox):
"""Toggling the use_custom_command checkbox needs to alter the
sensitivity of the custom_command entrybox"""
guiget = self.builder.get_object
widget = guiget('custom_command_entry')
value = checkbox.get_active()
widget.set_sensitive(value)
self.config['use_custom_command'] = value
self.config.save()
def on_system_font_checkbutton_toggled(self, checkbox):
"""Toggling the use_system_font checkbox needs to alter the
sensitivity of the font selector"""
guiget = self.builder.get_object
widget = guiget('font_selector')
value = checkbox.get_active()
widget.set_sensitive(not value)
self.config['use_system_font'] = value
self.config.save()
def on_reset_compatibility_clicked(self, widget):
"""Reset the confusing and annoying backspace/delete options to the
safest values"""
guiget = self.builder.get_object
widget = guiget('backspace_binding_combobox')
widget.set_active(2)
widget = guiget('delete_binding_combobox')
widget.set_active(3)
def on_background_type_toggled(self, _widget):
"""The background type was toggled"""
self.update_background_tab()
def update_background_tab(self):
"""Update the background tab"""
guiget = self.builder.get_object
# Background type
backtype = None
imagewidget = guiget('image_radiobutton')
transwidget = guiget('transparent_radiobutton')
if transwidget.get_active() == True:
backtype = 'transparent'
elif imagewidget.get_active() == True:
backtype = 'image'
else:
backtype = 'solid'
self.config['background_type'] = backtype
self.config.save()
if backtype == 'image':
guiget('background_image_filechooser').set_sensitive(True)
guiget('scroll_background_checkbutton').set_sensitive(True)
else:
guiget('background_image_filechooser').set_sensitive(False)
guiget('scroll_background_checkbutton').set_sensitive(False)
if backtype == 'transparent':
guiget('darken_background_scale').set_sensitive(True)
else:
guiget('darken_background_scale').set_sensitive(False)
def on_profile_selection_changed(self, selection):
"""A different profile was selected"""
(listmodel, rowiter) = selection.get_selected()
if not rowiter:
# Something is wrong, just jump to the first item in the list
treeview = selection.get_tree_view()
liststore = treeview.get_model()
selection.select_iter(liststore.get_iter_first())
return
profile = listmodel.get_value(rowiter, 0)
self.set_profile_values(profile)
self.previous_profile_selection = profile
widget = self.builder.get_object('profileremovebutton')
if profile == 'default':
widget.set_sensitive(False)
else:
widget.set_sensitive(True)
def on_plugin_selection_changed(self, selection):
"""A different plugin was selected"""
(listmodel, rowiter) = selection.get_selected()
if not rowiter:
# Something is wrong, just jump to the first item in the list
treeview = selection.get_tree_view()
liststore = treeview.get_model()
selection.select_iter(liststore.get_iter_first())
return
plugin = listmodel.get_value(rowiter, 0)
self.set_plugin(plugin)
self.previous_plugin_selection = plugin
widget = self.builder.get_object('plugintogglebutton')
def on_plugin_toggled(self, cell, path):
"""A plugin has been enabled or disabled"""
treeview = self.builder.get_object('pluginlist')
model = treeview.get_model()
plugin = model[path][0]
if not self.plugins[plugin]:
# Plugin is currently disabled, load it
self.registry.enable(plugin)
else:
# Plugin is currently enabled, unload it
self.registry.disable(plugin)
self.plugins[plugin] = not self.plugins[plugin]
# Update the treeview
model[path][1] = self.plugins[plugin]
enabled_plugins = [x for x in self.plugins if self.plugins[x] == True]
self.config['enabled_plugins'] = enabled_plugins
self.config.save()
def set_plugin(self, plugin):
"""Show the preferences for the selected plugin, if any"""
pluginpanelabel = self.builder.get_object('pluginpanelabel')
pluginconfig = self.config.plugin_get_config(plugin)
# FIXME: Implement this, we need to auto-construct a UI for the plugin
def on_profile_name_edited(self, cell, path, newtext):
"""Update a profile name"""
oldname = cell.get_property('text')
if oldname == newtext or oldname == 'default':
return
dbg('PrefsEditor::on_profile_name_edited: Changing %s to %s' %
(oldname, newtext))
self.config.rename_profile(oldname, newtext)
self.config.save()
widget = self.builder.get_object('profilelist')
model = widget.get_model()
itera = model.get_iter(path)
model.set_value(itera, 0, newtext)
if oldname == self.previous_profile_selection:
self.previous_profile_selection = newtext
def on_layout_selection_changed(self, selection):
"""A different layout was selected"""
self.layouteditor.on_layout_selection_changed(selection)
def on_layout_item_selection_changed(self, selection):
"""A different item in the layout was selected"""
self.layouteditor.on_layout_item_selection_changed(selection)
def on_layout_profile_chooser_changed(self, widget):
"""A different profile has been selected for this item"""
self.layouteditor.on_layout_profile_chooser_changed(widget)
def on_layout_profile_command_changed(self, widget):
"""A different command has been entered for this item"""
self.layouteditor.on_layout_profile_command_activate(widget)
def on_layout_name_edited(self, cell, path, newtext):
"""Update a layout name"""
oldname = cell.get_property('text')
if oldname == newtext or oldname == 'default':
return
dbg('Changing %s to %s' % (oldname, newtext))
self.config.rename_layout(oldname, newtext)
self.config.save()
widget = self.builder.get_object('layoutlist')
model = widget.get_model()
itera = model.get_iter(path)
model.set_value(itera, 0, newtext)
if oldname == self.previous_layout_selection:
self.previous_layout_selection = newtext
if oldname == self.layouteditor.layout_name:
self.layouteditor.layout_name = newtext
def on_color_scheme_combobox_changed(self, widget):
"""Update the fore/background colour pickers"""
value = None
guiget = self.builder.get_object
active = widget.get_active()
for key in self.colorschemevalues.keys():
if self.colorschemevalues[key] == active:
value = key
fore = guiget('foreground_colorpicker')
back = guiget('background_colorpicker')
if value == 'custom':
fore.set_sensitive(True)
back.set_sensitive(True)
else:
fore.set_sensitive(False)
back.set_sensitive(False)
forecol = None
backcol = None
if value in self.colourschemes:
forecol = self.colourschemes[value][0]
backcol = self.colourschemes[value][1]
elif value == 'custom':
forecol = color2hex(fore)
backcol = color2hex(back)
else:
err('Unknown colourscheme value: %s' % value)
return
fore.set_color(gtk.gdk.Color(forecol))
back.set_color(gtk.gdk.Color(backcol))
self.config['foreground_color'] = forecol
self.config['background_color'] = backcol
self.config.save()
def on_use_theme_colors_checkbutton_toggled(self, widget):
"""Update colour pickers"""
guiget = self.builder.get_object
active = widget.get_active()
scheme = guiget('color_scheme_combobox')
fore = guiget('foreground_colorpicker')
back = guiget('background_colorpicker')
if active:
for widget in [scheme, fore, back]:
widget.set_sensitive(False)
else:
scheme.set_sensitive(True)
self.on_color_scheme_combobox_changed(scheme)
self.config['use_theme_colors'] = active
self.config.save()
def on_cellrenderer_accel_edited(self, liststore, path, key, mods, _code):
"""Handle an edited keybinding"""
celliter = liststore.get_iter_from_string(path)
liststore.set(celliter, 2, key, 3, mods)
binding = liststore.get_value(liststore.get_iter(path), 0)
accel = gtk.accelerator_name(key, mods)
self.config['keybindings'][binding] = accel
self.config.save()
def on_cellrenderer_accel_cleared(self, liststore, path):
"""Handle the clearing of a keybinding accelerator"""
celliter = liststore.get_iter_from_string(path)
liststore.set(celliter, 2, 0, 3, 0)
binding = liststore.get_value(liststore.get_iter(path), 0)
self.config['keybindings'][binding] = None
self.config.save()
class LayoutEditor:
profile_ids_to_profile = None
profile_profile_to_ids = None
layout_name = None
layout_item = None
builder = None
treeview = None
treestore = None
config = None
def __init__(self, builder):
"""Initialise ourself"""
self.config = config.Config()
self.builder = builder
def prepare(self, layout=None):
"""Do the things we can't do in __init__"""
self.treeview = self.builder.get_object('LayoutTreeView')
self.treestore = self.builder.get_object('LayoutTreeStore')
self.update_profiles()
if layout:
self.set_layout(layout)
def set_layout(self, layout_name):
"""Load a particular layout"""
self.layout_name = layout_name
store = self.treestore
layout = self.config.layout_get_config(layout_name)
listitems = {}
store.clear()
children = layout.keys()
i = 0
while children != []:
child = children.pop()
child_type = layout[child]['type']
parent = layout[child]['parent']
if child_type != 'Window' and parent not in layout:
# We have an orphan!
err('%s is an orphan in this layout. Discarding' % child)
continue
try:
parentiter = listitems[parent]
except KeyError:
if child_type == 'Window':
parentiter = None
else:
# We're not ready for this widget yet
children.insert(0, child)
continue
if child_type == 'VPaned':
child_type = 'Vertical split'
elif child_type == 'HPaned':
child_type = 'Horizontal split'
listitems[child] = store.append(parentiter, [child, child_type])
treeview = self.builder.get_object('LayoutTreeView')
treeview.expand_all()
def update_profiles(self):
"""Update the list of profiles"""
self.profile_ids_to_profile = {}
self.profile_profile_to_ids= {}
chooser = self.builder.get_object('layout_profile_chooser')
model = chooser.get_model()
model.clear()
profiles = self.config.list_profiles()
profiles.sort()
i = 0
for profile in profiles:
self.profile_ids_to_profile[i] = profile
self.profile_profile_to_ids[profile] = i
model.append([profile])
i = i + 1
def on_layout_selection_changed(self, selection):
"""A different layout was selected"""
(listmodel, rowiter) = selection.get_selected()
if not rowiter:
# Something is wrong, just jump to the first item in the list
selection.select_iter(self.treestore.get_iter_first())
return
layout = listmodel.get_value(rowiter, 0)
self.set_layout(layout)
self.previous_layout_selection = layout
widget = self.builder.get_object('layoutremovebutton')
if layout == 'default':
widget.set_sensitive(False)
else:
widget.set_sensitive(True)
def on_layout_item_selection_changed(self, selection):
"""A different item in the layout was selected"""
(treemodel, rowiter) = selection.get_selected()
if not rowiter:
return
item = treemodel.get_value(rowiter, 0)
self.layout_item = item
self.set_layout_item(item)
def set_layout_item(self, item_name):
"""Set a layout item"""
layout = self.config.layout_get_config(self.layout_name)
layout_item = layout[self.layout_item]
command = self.builder.get_object('layout_profile_command')
chooser = self.builder.get_object('layout_profile_chooser')
if layout_item['type'] != 'Terminal':
command.set_sensitive(False)
chooser.set_sensitive(False)
return
command.set_sensitive(True)
chooser.set_sensitive(True)
if layout_item.has_key('command') and layout_item['command'] != '':
command.set_text(layout_item['command'])
else:
command.set_text('')
if layout_item.has_key('profile') and layout_item['profile'] != '':
chooser.set_active(self.profile_profile_to_ids[layout_item['profile']])
else:
chooser.set_active(0)
def on_layout_profile_chooser_changed(self, widget):
"""A new profile has been selected for this item"""
if not self.layout_item:
return
profile = widget.get_active_text()
layout = self.config.layout_get_config(self.layout_name)
layout[self.layout_item]['profile'] = profile
self.config.save()
def on_layout_profile_command_activate(self, widget):
"""A new command has been entered for this item"""
command = widget.get_text()
layout = self.config.layout_get_config(self.layout_name)
layout[self.layout_item]['command'] = command
self.config.save()
if __name__ == '__main__':
import util
util.DEBUG = True
import terminal
TERM = terminal.Terminal()
PREFEDIT = PrefsEditor(TERM)
gtk.main()
| FreedomBen/terminator | terminatorlib/prefseditor.py | Python | gpl-2.0 | 57,238 | 0.002009 |
from struct import unpack, pack, calcsize
from mobi_languages import LANGUAGES
from lz77 import uncompress
def LOG(*args):
pass
MOBI_HDR_FIELDS = (
("id", 16, "4s"),
("header_len", 20, "I"),
("mobi_type", 24, "I"),
("encoding", 28, "I"),
("UID", 32, "I"),
("generator_version", 36, "I"),
("reserved", 40, "40s"),
("first_nonbook_idx", 80, "I"),
("full_name_offs", 84, "I"),
("full_name_len", 88, "I"),
("locale_highbytes", 92, "H"),
("locale_country", 94, "B"),
("locale_language", 95, "B"),
("input_lang", 96, "I"),
("output_lang", 100, "I"),
("format_version", 104, "I"),
("first_image_idx", 108, "I"),
("huff/cdic_record", 112, "I"),
("huff/cdic_count", 116, "I"),
("datp_record", 120, "I"),
("datp_count", 124, "I"),
("exth_flags", 128, "I"),
("unknowni@132", 132, "32s"),
("unknown@164", 164, "I"),
("drm_offs", 168, "I"),
("drm_count", 172, "I"),
("drm_size", 176, "I"),
("drm_flags", 180, "I"),
("unknown@184", 184, "I"),
("unknown@188", 188, "I"),
("unknown@192", 192, "H"),
("last_image_record", 194, "H"),
("unknown@196", 196, "I"),
("fcis_record", 200, "I"),
("unknown@204", 204, "I"),
("flis_record", 208, "I"),
("unknown@212", 212, "I"),
("extra_data_flags", 242, "H")
)
EXTH_FMT = ">4x2I"
'''4x = "EXTH", I = hlen, I = record count'''
EXTH_RECORD_TYPES = {
1: 'drm server id',
2: 'drm commerce id',
3: 'drm ebookbase book id',
100: 'author', # list
101: 'publisher', # list
102: 'imprint',
103: 'description',
104: 'isbn', # list
105: 'subject', # list
106: 'publication date',
107: 'review',
108: 'contributor', # list
109: 'rights',
110: 'subjectcode', # list
111: 'type',
112: 'source',
113: 'asin',
114: 'version number', # int
115: 'sample', # int (or bool)?
116: 'start reading',
117: 'adult',
118: 'retail price',
119: 'retail price currency',
201: 'cover offset', # int
202: 'thumbnail offset', # int
203: 'has fake cover', # bool?
208: 'watermark',
209: 'tamper proof keys',
401: 'clipping limit', # int
402: 'publisher limit',
404: 'ttsflag',
501: 'cde type',
502: 'last update time',
503: 'updated title'
}
PRC_HDRFMT = '>H2xIHHI' # Compression,unused,Len,Count,Size,Pos
def parse_palmdb(filename):
import palm
db = palm.Database(filename)
return db
class Book:
def __init__(self, fn):
self.filename = fn
# Set some fields to defaults
self.title = fn
self.author = "??"
self.language = "??"
# Rob Addition: Description
self.description = ""
self.is_a_book = False
f = open(fn)
d = f.read(68)
f.close()
encodings = {
1252: 'cp1252',
65001: 'utf-8'
}
supported_types = ('BOOKMOBI', 'TEXtREAd')
self.type = d[60:68]
if self.type not in supported_types:
LOG(1, "Unsupported file type %s" % (self.type))
return None
try:
db = parse_palmdb(fn)
except:
return None
self.is_a_book = True
# now we have a better guess at the title, use it for now
self.title = db.name
self.records = db.records
rec0 = self.records[0].data
#LOG(5,repr(rec0))
if self.type == 'BOOKMOBI':
LOG(3, "This is a MOBI book")
self.mobi = {}
for field, pos, fmt in MOBI_HDR_FIELDS:
end = pos + calcsize(fmt)
if (end > len(rec0) or
("header_len" in self.mobi
and end > self.mobi["header_len"])):
continue
LOG(4, "field: %s, fmt: %s, @ [%d:%d], data: %s" % (
field, fmt, pos, end, repr(rec0[pos:end])))
(self.mobi[field], ) = unpack(">%s" % fmt, rec0[pos:end])
LOG(3, "self.mobi: %s" % repr(self.mobi))
# Get and decode the book name
if self.mobi['locale_language'] in LANGUAGES:
lang = LANGUAGES[self.mobi['locale_language']]
if self.mobi['locale_country'] == 0:
LOG(2, "Book language: %s" % lang[0][1])
self.language = "%s (%s)" % (lang[0][1], lang[0][0])
elif self.mobi['locale_country'] in lang:
country = lang[self.mobi['locale_country']]
LOG(2, "Book language is %s (%s)" % (
lang[0][1], country[1]))
self.language = "%s (%s-%s)" % (
lang[0][1],
lang[0][0],
country[0]
)
pos = self.mobi['full_name_offs']
end = pos + self.mobi['full_name_len']
self.title = rec0[pos:end].decode(encodings[self.mobi['encoding']])
LOG(2, "Book name: %s" % self.title)
if self.mobi['id'] != 'MOBI':
LOG(0, "Mobi header missing!")
return None
if (0x40 & self.mobi['exth_flags']): # check for EXTH
self.exth = parse_exth(rec0, self.mobi['header_len'] + 16)
LOG(3, "EXTH header: %s" % repr(self.exth))
if 'author' in self.exth:
self.author = ' & '.join(self.exth['author'])
else:
self.author = "n/a"
self.rawdata = d
if (('updated title' in self.exth) and
(type(self.exth['updated title']) is str)):
self.title = ' '.join(self.exth['updated title'])
if 'description' in self.exth:
self.description = ' <P> '.join(self.exth['description'])
elif self.type == 'TEXtREAd':
LOG(2, "This is an older MOBI book")
self.rawdata = d
compression, data_len, rec_count, rec_size, pos = unpack(
PRC_HDRFMT, rec0[:calcsize(PRC_HDRFMT)])
LOG(3, "compression %d, data_len %d, rec_count %d, rec_size %d" %
(compression, data_len, rec_count, rec_size))
if compression == 2:
data = uncompress(self.records[1].data)
else:
data = self.records[1].data
from BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(data)
self.metadata = soup.fetch("dc-metadata")
try:
self.title = soup.fetch("dc:title")[0].getText()
self.author = soup.fetch("dc:creator")[0].getText()
self.language = soup.fetch("dc:language")[0].getText()
except:
self.title, self.author, self.language = ("Unknown", "Unknown",
"en-us")
try:
self.description = soup.fetch("dc:description")[0].getText()
except:
pass
def to_html(self):
last_idx = (
self.mobi['first_image_idx'] if 'mobi' in self.__dict__ else -1)
return ''.join([uncompress(x.data) for x in self.records[1:last_idx]])
def parse_exth(data, pos):
ret = {}
n = 0
if (pos != data.find('EXTH')):
LOG(0, "EXTH header not found where it should be @%d" % pos)
return None
else:
end = pos + calcsize(EXTH_FMT)
(hlen, count) = unpack(EXTH_FMT, data[pos:end])
LOG(4, "pos: %d, EXTH header len: %d, record count: %d" % (
pos, hlen, count))
pos = end
while n < count:
end = pos + calcsize(">2I")
t, l = unpack(">2I", data[pos:end])
v = data[end:pos + l]
if l - 8 == 4:
v = unpack(">I", v)[0]
if t in EXTH_RECORD_TYPES:
rec = EXTH_RECORD_TYPES[t]
LOG(4, "EXTH record '%s' @%d+%d: '%s'" % (
rec, pos, l - 8, v))
if rec not in ret:
ret[rec] = [v]
else:
ret[rec].append(v)
else:
LOG(4, "Found an unknown EXTH record type %d @%d+%d: '%s'" %
(t, pos, l - 8, repr(v)))
pos += l
n += 1
return ret
| robwebset/script.ebooks | resources/lib/kiehinen/ebook.py | Python | gpl-2.0 | 8,604 | 0.002092 |
#!/usr/bin/python
"""
Small web application to retrieve information from uniprot and itag for
a given compound.
The idea is that for one compound we are able to find out in which
reactions it is involved and what are the proteins involved in these
reactions. For each of these proteins we can find if there are genes and
genes from tomato associated with them.
"""
from flask import Flask, Response, render_template, request, redirect, url_for
from flaskext.wtf import Form, TextField
import ConfigParser
import datetime
import json
import os
import rdflib
import urllib
CONFIG = ConfigParser.ConfigParser()
CONFIG.readfp(open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'chebi2gene.cfg')))
# Address of the sparql server to query.
SERVER = CONFIG.get('chebi2gene', 'sparql_server')
# Create the application.
APP = Flask(__name__)
APP.secret_key = CONFIG.get('chebi2gene', 'secret_key')
# Stores in which graphs are the different source of information.
GRAPHS = {option: CONFIG.get('graph', option) for option in CONFIG.options('graph')}
print GRAPHS
class ChebiIDForm(Form):
""" Simple text field form to input the chebi identifier or the
name of the protein.
"""
chebi_id = TextField('Chebi ID or molecule name')
def convert_to_uniprot_id(data):
""" Converts from RHEA Uniprot URI to Uniprot ID.
@param data, a dictionary of String: [String] where the keys are
reaction ID and the values are protein URI.
@return, a dictionary of String: [String] where the keys are
reaction ID and the values are protein ID.
"""
for key in data:
proteins = data[key]
proteins2 = []
for protein in proteins:
prot_id = protein.rsplit(':', 1)[1]
proteins2.append(prot_id.strip())
data[key] = proteins2
return data
def get_exact_chebi_from_search(name):
""" Search the chebi database for molecule having the given string
in their name. The data returned contains the chebi identifier, the
name and synonyms of the molecule in chebi.
@param name, a string, name of the molecule to search in chebi.
@return, a dictionary containing all the molecule found for having
the input string in their name. The data structure returned is like:
{string: {'name': string, 'syn': [String]}}, where the keys are the
chebi identifier and the values are dictionaries containing the
name of the molecules and a list of its synonym.
"""
query = '''
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX obo:<http://purl.obolibrary.org/obo#>
SELECT DISTINCT ?id ?name ?syn
FROM <%(chebi)s>
WHERE {
{
?id rdfs:label ?name .
?id obo:Synonym ?syn .
FILTER (
regex(?name, "%(search)s", "i")
)
}
} ORDER BY ?id
''' % {'search': name, 'chebi': GRAPHS['chebi']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
molecules = {}
for entry in data_js['results']['bindings']:
chebi_id = entry['id']['value'].rsplit('/', 1)[1].split('_')[1]
if chebi_id in molecules:
molecules[chebi_id]['syn'].append(entry['syn']['value'])
else:
molecules[chebi_id] = {
'name': [entry['name']['value']],
'syn': [entry['syn']['value']]
}
return molecules
def get_extended_chebi_from_search(name):
""" Search the chebi database for molecule having the given string
in their name or in their synonyms. The data returned contains the
chebi identifier, the name and synonyms of the molecule in chebi.
@param name, a string, name of the molecule to search in chebi.
@return, a dictionary containing all the molecule found for having
the input string in their name or in their synonyms.
The data structure returned is like:
{string: {'name': string, 'syn': [String]}}, where the keys are the
chebi identifier and the values are dictionaries containing the
name of the molecules and a list of its synonym.
"""
query = '''
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX obo:<http://purl.obolibrary.org/obo#>
SELECT DISTINCT ?id ?name ?syn
FROM <%(chebi)s>
WHERE {
{
?id rdfs:label ?name .
?id obo:Synonym ?syn .
FILTER (
regex(?name, "%(search)s", "i")
|| regex(?syn, "%(search)s", "i")
)
}
} ORDER BY ?id
''' % {'search': name, 'chebi': GRAPHS['chebi']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
molecules = {}
for entry in data_js['results']['bindings']:
chebi_id = entry['id']['value'].rsplit('/', 1)[1].split('_')[1]
if chebi_id in molecules:
molecules[chebi_id]['syn'].append(entry['syn']['value'])
else:
molecules[chebi_id] = {
'name': [entry['name']['value']],
'syn': [entry['syn']['value']]
}
return molecules
def get_genes_of_proteins(data):
""" Returns the genes associated with proteins.
@param name, a dictionary where the keys are reactions identifier
and the values lists of proteins identifier.
@return, a dictionary containing all the genes related with the
proteins specified.
The data structure returned is like:
{string: [String]}, where the keys are the uniprot identifier and
the values are list of gene identifier associated with the protein.
"""
genes = {}
for key in data:
proteins = data[key]
# Let's make sure the identifiers are unique
proteins = list(set(proteins))
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX pos:<http://pbr.wur.nl/POSITION#>
SELECT DISTINCT ?prot ?name ?sca ?start ?stop ?desc
FROM <%(itag)s>
WHERE{
?gene gene:Protein ?prot .
FILTER (
?prot IN (
<http://purl.uniprot.org/uniprot/%(prot)s>
)
)
?gene gene:Position ?pos .
?pos pos:Scaffold ?sca .
?gene gene:Description ?desc .
?gene gene:FeatureName ?name .
?pos pos:Start ?start .
?pos pos:Stop ?stop .
} ORDER BY ?name
''' % {'prot': '>,\n<http://purl.uniprot.org/uniprot/'.join(
proteins), 'itag': GRAPHS['itag']}
data_js = sparql_query(query, SERVER)
for entry in data_js['results']['bindings']:
prot_id = entry['prot']['value'].rsplit('/', 1)[1]
gene = {}
for var in ['name', 'sca', 'start', 'stop', 'desc']:
gene[var] = entry[var]['value']
gene['sca'] = gene['sca'].rsplit('#', 1)[1]
if prot_id in genes:
genes[prot_id].append(gene)
else:
genes[prot_id] = [gene]
return genes
def get_pathways_of_proteins(data):
""" Returns the pathways associated with proteins.
@param name, a dictionary where the keys are reactions identifier
and the values lists of proteins.
@return, a dictionary containing all the pathways related with the
proteins specified.
The data structure returned is like:
{string: [String]}, where the keys are the uniprot identifier and
the values are list of pathways associated with the protein.
"""
pathways = {}
for key in data:
proteins = data[key]
# Let's make sure the identifiers are unique
proteins = list(set(proteins))
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
SELECT DISTINCT ?prot ?desc
FROM <%(uniprot)s>
WHERE {
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment ?desc .
FILTER (
?prot IN (
<http://purl.uniprot.org/uniprot/%(prot)s>
)
)
}
''' % {'prot':
'>,\n<http://purl.uniprot.org/uniprot/'.join(proteins),
'uniprot': GRAPHS['uniprot']}
data_js = sparql_query(query, SERVER)
for entry in data_js['results']['bindings']:
prot_id = entry['prot']['value'].rsplit('/', 1)[1]
path = entry['desc']['value']
if prot_id in pathways and path not in pathways[prot_id]:
pathways[prot_id].append(path)
else:
pathways[prot_id] = [path]
return pathways
def get_organism_of_proteins(data):
""" Returns the all organism associated with the proteins.
@param name, a dictionary where the keys are reactions identifier
and the values lists of proteins.
@return, a dictionary containing all the organism related with the
proteins specified.
The data structure returned is like:
{string: [String]}, where the keys are the uniprot identifier and
the values are list of organisms associated with the protein.
"""
organism = {}
for key in data:
proteins = data[key]
# Let's make sure the identifiers are unique
proteins = list(set(proteins))
query = '''
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?prot ?name
FROM <%(uniprot)s>
WHERE {
?prot uniprot:organism ?orga .
?orga uniprot:scientificName ?name .
FILTER (
?prot IN (
<http://purl.uniprot.org/uniprot/%(prot)s>
)
)
}
''' % {'prot':
'>,\n<http://purl.uniprot.org/uniprot/'.join(proteins),
'uniprot': GRAPHS['uniprot']}
data_js = sparql_query(query, SERVER)
for entry in data_js['results']['bindings']:
prot_id = entry['prot']['value'].rsplit('/', 1)[1]
orga = entry['name']['value']
if prot_id in organism and orga not in organism[prot_id]:
organism[prot_id].append(orga)
else:
organism[prot_id] = [orga]
return organism
def get_protein_of_chebi(chebi_id):
""" Returns the all protein associated with a compound.
@param name, a string, identifier of a compound on chebi.
@return, a dictionary containing all the proteins related with the
compound specified.
The data structure returned is like:
{string: [String]}, where the keys are reaction identifiers and the
values are list of proteins associated with the reaction.
"""
query = '''
prefix bp: <http://www.biopax.org/release/biopax-level2.owl#>
SELECT DISTINCT ?react ?xref
FROM <%(rhea)s>
WHERE {
?cmp bp:XREF <http://www.ebi.ac.uk/rhea#CHEBI:%(chebi_id)s> .
?dir ?p ?cmp .
?react ?p2 ?dir .
?react bp:XREF ?xref .
FILTER (
regex(?xref, 'UNIPROT')
)
}
''' % {'chebi_id': chebi_id, 'rhea': GRAPHS['rhea']}
data = sparql_query(query, SERVER)
if not data:
return
output = {}
for entry in data['results']['bindings']:
key = entry['react']['value'].split('#')[1]
if key in output:
output[key].append(entry['xref']['value'])
else:
output[key] = [entry['xref']['value']]
return output
def sparql_query(query, server, output_format='application/json'):
""" Runs the given SPARQL query against the desired sparql endpoint
and return the output in the format asked (default being rdf/xml).
@param query, the string of the sparql query that should be ran.
@param server, a string, the url of the sparql endpoint that we want
to run query against.
@param format, specifies in which format we want to have the output.
Defaults to `application/json` but can also be `application/rdf+xml`.
@return, a JSON object, representing the output of the provided
sparql query.
"""
params = {
'default-graph': '',
'should-sponge': 'soft',
'query': query,
'debug': 'off',
'timeout': '',
'format': output_format,
'save': 'display',
'fname': ''
}
querypart = urllib.urlencode(params)
response = urllib.urlopen(server, querypart).read()
try:
output = json.loads(response)
except ValueError:
output = {}
return output
def run_query_via_rdflib(query, server):
""" Runs the given query of the given server, loads the results
rdf/xml into a rdflib.Graph and return a rdf/xml representation of
this graph.
This is a bit of a hack to return a nicer rdf/xml representation of
the knowledge retrieve than older version of virtuoso offers.
From version 6.1.5 at least, this trick should not be needed
anymore.
@param query, the string of the sparql query that should be ran.
@param server, a string, the url of the sparql endpoint that we want
to run query against.
@return, a string, representing the rdf output of the provided query.
"""
graph = rdflib.Graph()
graph.parse(data=sparql_query(query, server),
output_format="application/rdf+xml")
return graph.serialize(format='xml')
## Web-app
@APP.route('/', methods=['GET', 'POST'])
def index():
""" Shows the front page.
All the content of this page is in the index.html file under the
templates directory. The file is full html and has no templating
logic within.
"""
print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(),
request.remote_addr, request.url)
form = ChebiIDForm(csrf_enabled=False)
if form.validate_on_submit():
try:
int(form.chebi_id.data)
return redirect(url_for('show_chebi',
chebi_id=form.chebi_id.data))
except ValueError:
return redirect(url_for('search_chebi',
name=form.chebi_id.data))
return render_template('index.html', form=form)
@APP.route('/search/<name>')
def search_chebi(name):
""" Search the CHEBI database for the name given.
"""
print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(),
request.remote_addr, request.url)
molecules = get_exact_chebi_from_search(name)
if molecules and len(molecules) == 1:
return redirect(url_for('show_chebi',
chebi_id=molecules.keys()[0]))
return render_template('search.html', data=molecules, search=name,
extended=False)
@APP.route('/fullsearch/<name>')
def search_chebi_extended(name):
""" Search the CHEBI database for the name given including the
synonyms.
"""
print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(),
request.remote_addr, request.url)
molecules = get_extended_chebi_from_search(name)
return render_template('search.html', data=molecules, search=name,
extended=True)
@APP.route('/chebi/<chebi_id>')
def show_chebi(chebi_id):
""" Shows the front page.
All the content of this page is in the index.html file under the
templates directory. The file is full html and has no templating
logic within.
"""
print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(),
request.remote_addr, request.url)
proteins = get_protein_of_chebi(chebi_id)
if not proteins:
return render_template('output.html', proteins=[],
pathways=None, genes=None, organisms=None, chebi=chebi_id)
proteins = convert_to_uniprot_id(proteins)
pathways = get_pathways_of_proteins(proteins)
genes = get_genes_of_proteins(proteins)
organisms = get_organism_of_proteins(proteins)
return render_template('output.html', proteins=proteins,
pathways=pathways, genes=genes, organisms=organisms,
chebi=chebi_id)
@APP.route('/csv/<chebi_id>')
def generate_csv(chebi_id):
""" Generate a comma separated value file containing all the
information.
"""
print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(),
request.remote_addr, request.url)
# Regenerate the informations
proteins = get_protein_of_chebi(chebi_id)
proteins = convert_to_uniprot_id(proteins)
pathways = get_pathways_of_proteins(proteins)
genes = get_genes_of_proteins(proteins)
organisms = get_organism_of_proteins(proteins)
string = 'Chebi ID, Chebi URL, Rhea ID, Rhea URL, UniProt, \
Organism, Type, Name, Scaffold, Start, Stop, Description\n'
chebi_url = 'http://www.ebi.ac.uk/chebi/searchId.do?chebiId=%s' % \
chebi_id
for reaction in proteins:
react_url = 'http://www.ebi.ac.uk/rhea/reaction.xhtml?id=RHEA:%s' % \
reaction
for protein in proteins[reaction]:
if protein in pathways:
for pathway in pathways[protein]:
string = string + '%s,%s,%s,%s,%s,%s,Pathway,%s\n' % (
chebi_id, chebi_url, reaction, react_url, protein,
" - ".join(organisms[protein]),
pathway)
if protein in genes:
for gene in genes[protein]:
string = string + \
'%s,%s,%s,%s,%s,%s,Gene,%s,%s,%s,%s,%s\n' % (
chebi_id, chebi_url, reaction, react_url, protein,
" - ".join(organisms[protein]),
gene['name'], gene['sca'],
gene['start'], gene['stop'], gene['desc'])
return Response(string, mimetype='application/excel')
if __name__ == '__main__':
APP.debug = True
APP.run()
| PBR/chebi2gene | chebi2gene.py | Python | bsd-3-clause | 17,793 | 0.001124 |
import aiohttp
import discord
import random
from config import GoogleAPIKey
from config import GoogleCSECX
async def google(cmd, message, args):
if not args:
await message.channel.send(cmd.help())
return
else:
search = ' '.join(args)
url = 'https://www.googleapis.com/customsearch/v1?q=' + search + '&cx=' + GoogleCSECX + '&key=' + GoogleAPIKey
async with aiohttp.ClientSession() as session:
async with session.get(url) as data:
results = await data.json()
google_colors = [0x4285f4, 0x34a853, 0xfbbc05, 0xea4335, 0x00a1f1, 0x7cbb00, 0xffbb00, 0xf65314]
embed_color = random.choice(google_colors)
try:
title = results['items'][0]['title']
url = results['items'][0]['link']
embed = discord.Embed(color=embed_color)
embed.set_author(name='Google', icon_url='https://avatars2.githubusercontent.com/u/1342004?v=3&s=400',
url='https://www.google.com/search?q=' + search)
embed.add_field(name=title, value='[**Link Here**](' + url + ')')
await message.channel.send(None, embed=embed)
except Exception as e:
cmd.log.error(e)
embed = discord.Embed(color=0xDB0000, title='❗ Daily Limit Reached.')
embed.set_footer(text='Google limits this API feature, and we hit that limit.')
await message.channel.send(None, embed=embed)
| valeth/apex-sigma | sigma/plugins/searches/google/google.py | Python | gpl-3.0 | 1,478 | 0.003392 |
from fsa import *
from nameGenerator import *
class IncrementalAdfa(Dfa):
"""This class is an Acyclic Deterministic Finite State Automaton
constructed by a list of words.
"""
def __init__(self, words, nameGenerator = None, sorted = False):
if nameGenerator is None:
nameGenerator = IndexNameGenerator()
self.nameGenerator = nameGenerator
if sorted:
self.createFromSortedListOfWords(words)
else:
self.createFromArbitraryListOfWords(words)
def getCommonPrefix(self, word):
stateName = self.startState
index = 0
nextStateName = stateName
while nextStateName is not None:
symbol = word[index]
stateName = nextStateName
if symbol in self.states[stateName].transitions:
nextStateName = self.states[stateName].transitions[symbol]
index += 1
else:
nextStateName = None
return (stateName, word[index:])
def hasChildren(self, stateName):
okay = False
if [s for s in list(self.states[stateName].transitions.values()) if s]:
okay = True
return okay
def addSuffix(self, stateName, currentSuffix):
lastState = stateName
while len(currentSuffix) > 0:
newStateName = self.nameGenerator.generate()
symbol = currentSuffix[0]
currentSuffix = currentSuffix[1:]
self.states[stateName].transitions[symbol] = newStateName
self.states[newStateName] = State(newStateName)
stateName = newStateName
self.finalStates.append(stateName)
def markedAsRegistered(self, stateName):
return stateName in self.register
def markAsRegistered(self, stateName):
self.register[stateName] = True
def equivalentRegisteredState(self, stateName):
equivatentState = None
for state in list(self.register.keys()):
if self.areEquivalents(state, stateName):
equivatentState = state
return equivatentState
def lastChild(self, stateName):
input = list(self.states[stateName].transitions.keys())
input.sort()
return (self.states[stateName].transitions[input[-1]], input[-1])
def replaceOrRegister(self, stateName):
#childName = self.finalStates[-1]
childName, lastSymbol = self.lastChild(stateName)
if not self.markedAsRegistered(childName):
if self.hasChildren(childName):
self.replaceOrRegister(childName)
equivalentState = self.equivalentRegisteredState(childName)
if equivalentState is not None:
self.deleteBranch(childName)
self.states[stateName].transitions[lastSymbol] = equivalentState
else:
self.markAsRegistered(childName)
def deleteBranch(self, child):
childs = [child]
while len(childs) > 0:
nextChilds = []
for child in childs:
nextChilds += [s for s in list(self.states[child].transitions.values()) if not self.markedAsRegistered(s)]
self.states.pop(child)
if child in self.finalStates:
self.finalStates.remove(child)
childs = nextChilds
def createFromSortedListOfWords(self, words):
self.register = {}
self.finalStates = []
self.startState = self.nameGenerator.generate()
self.states = {self.startState : State(self.startState)}
lastWord = None
for word in words:
if word.endswith('\n'):
word = word[:-1]
lastStateName, currentSuffix = self.getCommonPrefix(word)
if self.hasChildren(lastStateName):
self.replaceOrRegister(lastStateName)
self.addSuffix(lastStateName, currentSuffix)
self.replaceOrRegister(self.startState)
def createFromArbitraryListOfWords(self, words):
self.register = {}
self.finalStates = []
self.startState = self.nameGenerator.generate()
self.states = {self.startState : State(self.startState)}
| jpbarrette/moman | finenight/python/iadfa.py | Python | mit | 4,303 | 0.006739 |
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="show what fixtures and tests would be executed but "
"don't execute anything.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
my_cache_key = fixturedef.cache_key(request)
fixturedef.cached_result = (None, my_cache_key, None)
return fixturedef.cached_result
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True
| alfredodeza/pytest | src/_pytest/setupplan.py | Python | mit | 818 | 0 |
from draftmim import app
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.restless import APIManager
db = SQLAlchemy(app)
api_manager = APIManager(app, flask_sqlalchemy_db=db)
| markalansmith/draftmim | web/draftmim/core.py | Python | apache-2.0 | 189 | 0 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import click
@click.group(help='Utilities for developers and advanced users.')
def dev():
pass
@dev.command(name='refresh-cache',
short_help='Refresh CLI cache.',
help="Refresh the CLI cache. Use this command if you are "
"developing a plugin, or q2cli itself, and want your "
"changes to take effect in the CLI. A refresh of the cache "
"is necessary because package versions do not typically "
"change each time an update is made to a package's code. "
"Setting the environment variable Q2CLIDEV to any value "
"will always refresh the cache when a command is run.")
def refresh_cache():
import q2cli.cache
q2cli.cache.CACHE.refresh()
| jairideout/q2cli | q2cli/dev.py | Python | bsd-3-clause | 1,137 | 0 |
# Built-in
import os
import warnings
# Common
import numpy as np
# #############################################################################
# Triangular meshes
# #############################################################################
def tri_checkformat_NodesFaces(nodes, indfaces, ids=None):
# Check mesh type
if indfaces.shape[1] == 3:
mtype = 'tri'
elif indfaces.shape[1] == 4:
mtype = 'quad'
else:
msg = ("Mesh seems to be neither triangular nor quadrilateral\n"
+ " => unrecognized mesh type, not implemented yet")
raise Exception(msg)
# Check indexing !!!
indmax = int(np.nanmax(indfaces))
if indmax == nodes.shape[0]:
indfaces = indfaces - 1
elif indmax > nodes.shape[0]:
msg = ("There seems to be an indexing error\n"
+ "\t- np.max(indfaces) = {}".format(indmax)
+ "\t- nodes.shape[0] = {}".format(nodes.shape[0]))
raise Exception(msg)
# Check for duplicates
nnodes = nodes.shape[0]
nfaces = indfaces.shape[0]
nodesu, indnodesu = np.unique(nodes, axis=0, return_index=True)
facesu, indfacesu = np.unique(indfaces, axis=0, return_index=True)
facesuu = np.unique(facesu)
lc = [nodesu.shape[0] != nnodes,
facesu.shape[0] != nfaces,
facesuu.size != nnodes or np.any(facesuu != np.arange(0, nnodes))]
if any(lc):
msg = "Non-valid mesh in {}:\n".format(ids)
if lc[0]:
noddup = [ii for ii in range(0, nnodes) if ii not in indnodesu]
msg += (" Duplicate nodes: {}\n".format(nnodes - nodesu.shape[0])
+ "\t- nodes.shape: {}\n".format(nodes.shape)
+ "\t- unique nodes.shape: {}\n".format(nodesu.shape)
+ "\t- duplicate nodes indices: {}\n".format(noddup))
if lc[1]:
dupf = [ii for ii in range(0, nfaces) if ii not in indfacesu]
msg += (" Duplicate faces: {}\n".format(nfaces - facesu.shape[0])
+ "\t- faces.shape: {}\n".format(indfaces.shape)
+ "\t- unique faces.shape: {}".format(facesu.shape)
+ "\t- duplicate facess indices: {}\n".format(dupf))
if lc[2]:
nfu = facesuu.size
nodnotf = [ii for ii in range(0, nnodes) if ii not in facesuu]
fnotn = [ii for ii in facesuu if ii < 0 or ii >= nnodes]
msg += (" Non-bijective nodes indices vs faces:\n"
+ "\t- nb. nodes: {}\n".format(nnodes)
+ "\t- nb. unique nodes index in faces: {}\n".format(nfu)
+ "\t- nodes not in faces: {}\n".format(nodnotf)
+ "\t- faces ind not in nodes: {}\n".format(fnotn))
raise Exception(msg)
# Test for unused nodes
facesu = np.unique(indfaces)
c0 = np.all(facesu >= 0) and facesu.size == nnodes
if not c0:
indnot = [ii for ii in range(0, nnodes) if ii not in facesu]
msg = ("Some nodes not used in mesh of ids {}:\n".format(ids)
+ " - unused nodes indices: {}".format(indnot))
warnings.warn(msg)
# Convert to triangular mesh if necessary
if mtype == 'quad':
# Convert to tri mesh (solution for unstructured meshes)
indface = np.empty((indfaces.shape[0]*2, 3), dtype=int)
indface[::2, :] = indfaces[:, :3]
indface[1::2, :-1] = indfaces[:, 2:]
indface[1::2, -1] = indfaces[:, 0]
indfaces = indface
mtype = 'quadtri'
ntri = 2
else:
ntri = 1
# Check orientation
x, y = nodes[indfaces, 0], nodes[indfaces, 1]
orient = ((y[:, 1] - y[:, 0])*(x[:, 2] - x[:, 1])
- (y[:, 2] - y[:, 1])*(x[:, 1] - x[:, 0]))
indclock = orient > 0.
if np.any(indclock):
nclock, ntot = indclock.sum(), indfaces.shape[0]
msg = ("Some triangles not counter-clockwise\n"
+ " (necessary for matplotlib.tri.Triangulation)\n"
+ " => {}/{} triangles reshaped".format(nclock, ntot))
warnings.warn(msg)
(indfaces[indclock, 1],
indfaces[indclock, 2]) = indfaces[indclock, 2], indfaces[indclock, 1]
return indfaces, mtype, ntri
# #############################################################################
# Rectangular meshes
# #############################################################################
def _rect_checkRZ(aa, name='R', shapeRZ=None):
if aa.ndim == 1 and np.any(np.diff(aa) < 0.):
msg = "{} must be increasing!".format(name)
raise Exception(msg)
elif aa.ndim == 2:
lc = [np.all(np.diff(aa[0, :])) > 0.,
np.all(np.diff(aa[:, 0])) > 0.]
if np.sum(lc) != 1:
msg = "{} must have exactly one dim increasing".format(name)
raise Exception(msg)
if lc[0]:
aa = aa[0, :]
if shapeRZ[1] is None:
shapeRZ[1] = name
if shapeRZ[1] != name:
msg = ("Inconsistent shapeRZ[1]\n"
+ "\t- expected: [{}, ...]\n".format(name)
+ "\t- provided: {}".format(shapeRZ))
raise Exception(msg)
else:
aa = aa[:, 0]
if shapeRZ[0] is None:
shapeRZ[0] = name
assert shapeRZ[0] == name
return aa, shapeRZ
def rect_checkformat(R, Z, datashape=None,
shapeRZ=None, ids=None):
if R.ndim not in [1, 2] or Z.ndim not in [1, 2]:
msg = ""
raise Exception(msg)
shapeu = np.unique(np.r_[R.shape, Z.shape])
if shapeRZ is None:
shapeRZ = [None, None]
# Check R, Z
R, shapeRZ = _rect_checkRZ(R, name='R', shapeRZ=shapeRZ)
Z, shapeRZ = _rect_checkRZ(Z, name='Z', shapeRZ=shapeRZ)
if datashape is not None:
if None in shapeRZ:
pass
shapeRZ = tuple(shapeRZ)
if shapeRZ == ('R', 'Z'):
datashape_exp = (R.size, Z.size)
elif shapeRZ == ('Z', 'R'):
datashape_exp = (Z.size, R.size)
else:
msg = "Inconsistent data shape !"
raise Exception(msg)
if datashape != datashape_exp:
msg = ("Inconsistent data shape\n"
+ "\t- shapeRZ = {}\n".format(shapeRZ)
+ "\t- datashape expected: {}\n".format(datashape_exp)
+ "\t- datashape provided: {}\n".format(datashape))
raise Exception(msg)
if None not in shapeRZ:
shapeRZ = tuple(shapeRZ)
if shapeRZ not in [('R', 'Z'), ('Z', 'R')]:
msg = ("Wrong value for shapeRZ:\n"
+ "\t- expected: ('R', 'Z') or ('Z', 'R')\n"
+ "\t- provided: {}".format(shapeRZ))
raise Exception(msg)
return R, Z, shapeRZ, 0
| Didou09/tofu | tofu/imas2tofu/_comp_mesh.py | Python | mit | 6,909 | 0 |
DESCRIPTION = """\
ACQ4 is a python-based platform for experimental neurophysiology.
It includes support for standard electrophysiology, multiphoton imaging,
scanning laser photostimulation, and many other experimental techniques. ACQ4 is
highly modular and extensible, allowing support to be added for new types of
devices, techniques, user-interface modules, and analyses.
"""
setupOpts = dict(
name='acq4',
description='Neurophysiology acquisition and analysis platform',
long_description=DESCRIPTION,
license='MIT',
url='http://www.acq4.org',
author='Luke Campagnola',
author_email='luke.campagnola@gmail.com',
)
from setuptools import setup
import distutils.dir_util
import distutils.sysconfig
import os, sys, re
from subprocess import check_output
## generate list of all sub-packages
path = os.path.abspath(os.path.dirname(__file__))
n = len(path.split(os.path.sep))
subdirs = [i[0].split(os.path.sep)[n:] for i in os.walk(os.path.join(path, 'acq4')) if '__init__.py' in i[2]]
allPackages = ['.'.join(p) for p in subdirs]
## Make sure build directory is clean before installing
buildPath = os.path.join(path, 'build')
if os.path.isdir(buildPath):
distutils.dir_util.remove_tree(buildPath)
## Determine current version string
initfile = os.path.join(path, 'acq4', '__init__.py')
init = open(initfile).read()
m = re.search(r'__version__ = (\S+)\n', init)
if m is None or len(m.groups()) != 1:
raise Exception("Cannot determine __version__ from init file: '%s'!" % initfile)
version = m.group(1).strip('\'\"')
initVersion = version
# If this is a git checkout, try to generate a more decriptive version string
try:
if os.path.isdir(os.path.join(path, '.git')):
def gitCommit(name):
commit = check_output(['git', 'show', name], universal_newlines=True).split('\n')[0]
assert commit[:7] == 'commit '
return commit[7:]
# Find last tag matching "acq4-.*"
tagNames = check_output(['git', 'tag'], universal_newlines=True).strip().split('\n')
while True:
if len(tagNames) == 0:
raise Exception("Could not determine last tagged version.")
lastTagName = tagNames.pop()
if re.match(r'acq4-.*', lastTagName):
break
# is this commit an unchanged checkout of the last tagged version?
lastTag = gitCommit(lastTagName)
head = gitCommit('HEAD')
if head != lastTag:
branch = re.search(r'\* (.*)', check_output(['git', 'branch'], universal_newlines=True)).group(1)
version = version + "-%s-%s" % (branch, head[:10])
# any uncommitted modifications?
modified = False
status = check_output(['git', 'status', '-s'], universal_newlines=True).strip().split('\n')
for line in status:
if line.strip() != '' and line[:2] != '??':
modified = True
break
if modified:
version = version + '+'
sys.stderr.write("Detected git commit; will use version string: '%s'\n" % version)
except:
version = initVersion
sys.stderr.write("This appears to be a git checkout, but an error occurred "
"while attempting to determine a version string for the "
"current commit.\nUsing the unmodified version string "
"instead: '%s'\n" % version)
sys.excepthook(*sys.exc_info())
print("__init__ version: %s current version: %s" % (initVersion, version))
if 'upload' in sys.argv and version != initVersion:
print("Base version does not match current; stubbornly refusing to upload.")
exit()
import distutils.command.build
class Build(distutils.command.build.build):
def run(self):
ret = distutils.command.build.build.run(self)
# If the version in __init__ is different from the automatically-generated
# version string, then we will update __init__ in the build directory
global path, version, initVersion
if initVersion == version:
return ret
initfile = os.path.join(path, self.build_lib, 'acq4', '__init__.py')
if not os.path.isfile(initfile):
sys.stderr.write("Warning: setup detected a git install and attempted "
"to generate a descriptive version string; however, "
"the expected build file at %s was not found. "
"Installation will use the original version string "
"%s instead.\n" % (initfile, initVersion)
)
else:
data = open(initfile, 'r').read()
open(initfile, 'w').write(re.sub(r"__version__ = .*", "__version__ = '%s'" % version, data))
# If this is windows, we need to update acq4.bat to reference the correct python executable.
if sys.platform == 'win32':
runner = os.path.join(path, self.build_scripts, 'acq4.bat')
runcmd = "%s -m acq4" % sys.executable
data = open(runner, 'r').read()
open(runner, 'w').write(re.sub(r'python -m acq4', runcmd, data))
return ret
# copy config tree to system location
# if sys.platform == 'win32':
# dataRoot = os.path.join(os.environ['ProgramFiles'], 'acq4')
# elif sys.platform == 'darwin':
# dataRoot = 'Library/Application Support/acq4'
# else:
# dataRoot = '/etc/acq4'
# instead, just install config example to same path as package.
if sys.platform == 'win32':
#dataRoot = distutils.sysconfig.get_python_lib().replace(sys.prefix, '')
dataRoot = 'Lib/site-packages/acq4'
else:
#dataRoot = 'python%d.%d/site-packages/acq4' % (sys.version_info.major, sys.version_info.minor)
dataRoot = distutils.sysconfig.get_python_lib().replace(sys.prefix+'/', '') + '/acq4'
dataFiles = []
configRoot = os.path.join(path, 'config')
for subpath, _, files in os.walk(configRoot):
endPath = subpath[len(path):].lstrip(os.path.sep)
files = [os.path.join(endPath, f) for f in files]
dataFiles.append((os.path.join(dataRoot, endPath), files))
# print dataFiles[-1]
packageData = []
pkgRoot = os.path.join(path, 'acq4')
for subpath, _, files in os.walk(pkgRoot):
for f in files:
addTo = None
for ext in ['.png', '.cache', '.h', '.hpp', '.dll']:
if f.endswith(ext):
packageData.append(os.path.join(subpath, f)[len(pkgRoot):].lstrip(os.path.sep))
if sys.platform == 'win32':
scripts = ['bin/acq4.bat']
else:
scripts = ['bin/acq4']
setup(
version=version,
cmdclass={'build': Build},
packages=allPackages,
package_dir={},
package_data={'acq4': packageData},
data_files=dataFiles,
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering",
],
install_requires = [
'numpy',
'scipy',
'h5py',
'pillow',
],
scripts = scripts,
**setupOpts
)
| mgraupe/acq4 | setup.py | Python | mit | 7,496 | 0.007204 |
from root import *
version = 'v0.1'
| Strangemother/python-state-machine | scatter/__init__.py | Python | mit | 36 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module is a bunch of functions that are called from the settings
# menu to manage various files groups.
import xbmcaddon
import xbmcgui
import xbmcvfs
import datetime
import os
from libs.vpnproviders import removeGeneratedFiles, cleanPassFiles, providers, usesUserKeys, usesMultipleKeys, getUserKeys
from libs.vpnproviders import getUserCerts, getVPNDisplay, getVPNLocation, removeDownloadedFiles, isAlternative, resetAlternative
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID, getName
from libs.vpnplatform import getLogPath, getUserDataPath, writeVPNLog, copySystemdFiles, addSystemd, removeSystemd, generateVPNs
from libs.common import resetVPNConnections, isVPNConnected, disconnectVPN, suspendConfigUpdate, resumeConfigUpdate, dnsFix, getVPNRequestedProfile
from libs.common import resetVPNProvider, setAPICommand
from libs.ipinfo import resetIPServices
try:
from libs.generation import generateAll
except:
pass
action = sys.argv[1]
debugTrace("-- Entered managefiles.py with parameter " + action + " --")
if not getID() == "":
addon = xbmcaddon.Addon(getID())
addon_name = getName()
# Reset the ovpn files
if action == "ovpn":
if getVPNRequestedProfile() == "":
if xbmcgui.Dialog().yesno(addon_name, "Resetting the VPN provider will disconnect and reset all VPN connections, and then remove any files that have been created. Continue?"):
suspendConfigUpdate()
# Disconnect so that live files are not being modified
resetVPNConnections(addon)
infoTrace("managefiles.py", "Resetting the VPN provider")
# Delete the generated files, and reset the locations so it can be selected again
removeGeneratedFiles()
# Delete any values that have previously been validated
vpn_provider = getVPNLocation(addon.getSetting("vpn_provider"))
if isAlternative(vpn_provider): resetAlternative(vpn_provider)
# Reset the IP service error counts, etc
resetIPServices()
addon = xbmcaddon.Addon(getID())
resetVPNProvider(addon)
addon = xbmcaddon.Addon(getID())
resumeConfigUpdate()
xbmcgui.Dialog().ok(addon_name, "Reset the VPN provider. Validate a connection to start using a VPN again.")
else:
xbmcgui.Dialog().ok(addon_name, "Connection to VPN being attempted and has been aborted. Try again in a few seconds.")
setAPICommand("Disconnect")
# Generate the VPN provider files
if action == "generate":
# Only used during development to create location files
generateAll()
xbmcgui.Dialog().ok(addon_name, "Regenerated some or all of the VPN location files.")
# Delete all of the downloaded VPN files
if action == "downloads":
debugTrace("Deleting all downloaded VPN files")
removeDownloadedFiles()
xbmcgui.Dialog().ok(addon_name, "Deleted all of the downloaded VPN files. They'll be downloaded again if required.")
# Copy the log file
elif action == "log":
log_path = ""
dest_path = ""
try:
log_path = getLogPath()
start_dir = ""
dest_folder = xbmcgui.Dialog().browse(0, "Select folder to copy log file into", "files", "", False, False, start_dir, False)
dest_path = "kodi " + datetime.datetime.now().strftime("%y-%m-%d %H-%M-%S") + ".log"
dest_path = dest_folder + dest_path.replace(" ", "_")
# Write VPN log to log before copying
writeVPNLog()
debugTrace("Copying " + log_path + " to " + dest_path)
addon = xbmcaddon.Addon(getID())
infoTrace("managefiles.py", "Copying log file to " + dest_path + ". Using version " + addon.getSetting("version_number"))
xbmcvfs.copy(log_path, dest_path)
if not xbmcvfs.exists(dest_path): raise IOError('Failed to copy log ' + log_path + " to " + dest_path)
dialog_message = "Copied log file to: " + dest_path
except:
errorTrace("managefiles.py", "Failed to copy log from " + log_path + " to " + dest_path)
if xbmcvfs.exists(log_path):
dialog_message = "Error copying log, try copying it to a different location."
else:
dialog_messsage = "Could not find the kodi.log file."
errorTrace("managefiles.py", dialog_message + " " + log_path + ", " + dest_path)
xbmcgui.Dialog().ok("Log Copy", dialog_message)
# Delete the user key and cert files
elif action == "user":
if addon.getSetting("1_vpn_validated") == "" or xbmcgui.Dialog().yesno(addon_name, "Deleting key and certificate files will disconnect and reset all VPN connections. Connections must be re-validated before use. Continue?"):
# Disconnect so that live files are not being modified
if isVPNConnected(): resetVPNConnections(addon)
# Select the provider
provider_list = []
for provider in providers:
if usesUserKeys(provider):
provider_list.append(getVPNDisplay(provider))
provider_list.sort()
index = xbmcgui.Dialog().select("Select VPN provider", provider_list)
provider_display = provider_list[index]
provider = getVPNLocation(provider_display)
# Get the key/cert pairs for that provider and offer up for deletion
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) > 0 or len(user_certs) > 0:
still_deleting = True
while still_deleting:
if len(user_keys) > 0 or len(user_certs) > 0:
# Build a list of things to display. We should always have pairs, but if
# something didn't copy or the user has messed with the dir this will cope
all_user = []
single_pair = "user [I](Same key and certificate used for all connections)[/I]"
for key in user_keys:
list_item = os.path.basename(key)
list_item = list_item.replace(".key", "")
if list_item == "user": list_item = single_pair
all_user.append(list_item)
for cert in user_certs:
list_item = os.path.basename(cert)
list_item = list_item.replace(".crt", "")
if list_item == "user": list_item = single_pair
if not list_item in all_user: all_user.append(list_item)
all_user.sort()
# Offer a delete all option if there are multiple keys
all_item = "[I]Delete all key and certificate files[/I]"
if usesMultipleKeys(provider):
all_user.append(all_item)
# Add in a finished option
finished_item = "[I]Finished[/I]"
all_user.append(finished_item)
# Get the pair to delete
index = xbmcgui.Dialog().select("Select key and certificate to delete, or [I]Finished[/I]", all_user)
if all_user[index] == finished_item:
still_deleting = False
else:
if all_user[index] == single_pair : all_user[index] = "user"
if all_user[index] == all_item:
if xbmcgui.Dialog().yesno(addon_name, "Are you sure you want to delete all key and certificate files for " + provider_display + "?"):
for item in all_user:
if not item == all_item and not item == finished_item:
path = getUserDataPath(provider + "/" + item)
try:
if xbmcvfs.exists(path + ".key"):
xbmcvfs.delete(path + ".key")
if xbmcvfs.exists(path + ".txt"):
xbmcvfs.delete(path + ".txt")
if xbmcvfs.exists(path + ".crt"):
xbmcvfs.delete(path + ".crt")
except:
xbmcgui.Dialog().ok(addon_name, "Couldn't delete one of the key or certificate files: " + path)
else:
path = getUserDataPath(provider + "/" + all_user[index])
try:
if xbmcvfs.exists(path+".key"):
xbmcvfs.delete(path + ".key")
if xbmcvfs.exists(path + ".txt"):
xbmcvfs.delete(path + ".txt")
if xbmcvfs.exists(path + ".crt"):
xbmcvfs.delete(path + ".crt")
except:
xbmcgui.Dialog().ok(addon_name, "Couldn't delete one of the key or certificate files: " + path)
# Fetch the directory list again
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) == 0 and len(user_certs) == 0:
xbmcgui.Dialog().ok(addon_name, "All key and certificate files for " + provider_display + " have been deleted.")
else:
still_deleting = False
else:
xbmcgui.Dialog().ok(addon_name, "No key and certificate files exist for " + provider_display + ".")
# Fix the user defined files with DNS goodness
if action == "dns":
dnsFix()
command = "Addon.OpenSettings(" + getID() + ")"
xbmc.executebuiltin(command)
else:
errorTrace("managefiles.py", "VPN service is not ready")
debugTrace("-- Exit managefiles.py --")
| Zomboided/VPN-Manager | managefiles.py | Python | gpl-2.0 | 11,679 | 0.007192 |
#!/usr/bin/python
import os
import sys
import time
import logging
import subprocess
replot_poll_period = 1
plot_script_extension = '.gnuplot'
plot_script = None
for f in os.listdir('.'):
if f.endswith(plot_script_extension):
plot_script = f
break
assert plot_script != None, 'No file ending with "%s" found in current directory.' % plot_script_extension
# Check if the plot script is already being plotted
# by another instance of the script
lock_file = plot_script + '.lock'
def refresh_lock():
with open(lock_file, 'w') as lock:
lock.write( "%.3f" % (time.time()) )
def exit_if_locked():
try:
with open(lock_file, 'r') as f:
# the lockfile contains a timestamp
if float(f.read()) > time.time() - max(3, 2*replot_poll_period):
logging.warn("It seems that the file (%s) is already being plotted. Exiting...",
plot_script)
exit()
except IOError:
return # lock doesn't exist
def print_plot_script(fname):
with open(fname, 'r') as f:
print '---start of %s---' % fname
for i,l in enumerate(f.read().split('\n')): print '%3d %s' % (i,l)
print '---end of %s---\n' % fname
sys.stdout.flush()
exit_if_locked() # technically, this and the lock file creation should be done atomically...
try:
refresh_lock()
file_to_monitor = plot_script
files_that_must_exist = [ plot_script ]
plotted_once = False
# Watch directory for changes and replot when necessary.
# Use simple polling of st_mtime since it works on Linux and Windows
# and the polling period is reasonably slow (~seconds).
gp = None
while not plotted_once or gp.poll() == None: # keep polling as long as gnuplot is alive
if all( os.path.exists(ff) for ff in files_that_must_exist):
if not plotted_once:
# Initial plot
plot_last_changed_time = os.path.getmtime(file_to_monitor)
tex_last_changed_time = 0
print_plot_script(plot_script)
gp = subprocess.Popen(['gnuplot', plot_script, '-'],
stdin=subprocess.PIPE, stdout=sys.stdout, stderr=sys.stderr)
plotted_once = True
print "Replotting every %g seconds (if plot script modification time changes)..." % replot_poll_period
print "Hit <ctrl> + C to exit."
else:
# Update plot if the plot script was modified
try:
plot_changed_time = os.path.getmtime(file_to_monitor)
if plot_changed_time != plot_last_changed_time and gp != None:
#logging.debug('Plot changed. Reloading plot script.')
gp.stdin.write('load "%s"\n' % plot_script)
plot_last_changed_time = plot_changed_time
# compile .tex to PDF
tex_to_watch = 'output.tex'
if os.path.isfile(tex_to_watch):
try:
tex_changed_time = os.path.getmtime(tex_to_watch)
if tex_changed_time != tex_last_changed_time:
tex_last_changed_time = tex_changed_time
with open('pdflatex.out', 'w') as log_file:
subprocess.call(['pdflatex', '-halt-on-error', 'output'], stdin=None, stdout=log_file, stderr=log_file)
except:
print 'Call to pdflatex failed. See pdflatex.out.'
except OSError:
pass # the plot script does not exist which is normal if the plot was overwritten.
time.sleep(replot_poll_period)
refresh_lock()
finally:
try: os.remove(lock_file)
except: pass
print "The plot engine has terminated. Exiting."
| govenius/plotbridge | examples/gnuplot_with_direction/expected_output/gnuplot.interactive.py | Python | gpl-2.0 | 3,554 | 0.019977 |
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from os.path import join
from .static_data import MATRIX_DIR
def read_coefficients(
key_type='row',
verbose=True,
filename=join(MATRIX_DIR, 'pmbec.mat')):
"""
Parameters
------------
filename : str
Location of PMBEC coefficient matrix
key_type : str
'row' : every key is a single amino acid,
which maps to a dictionary for that row
'pair' : every key is a tuple of amino acids
'pair_string' : every key is a string of two amino acid characters
verbose : bool
Print rows of matrix as we read them
"""
d = {}
if key_type == 'row':
def add_pair(row_letter, col_letter, value):
if row_letter not in d:
d[row_letter] = {}
d[row_letter][col_letter] = value
elif key_type == 'pair':
def add_pair(row_letter, col_letter, value):
d[(row_letter, col_letter)] = value
else:
assert key_type == 'pair_string', \
"Invalid dictionary key type: %s" % key_type
def add_pair(row_letter, col_letter, value):
d["%s%s" % (row_letter, col_letter)] = value
with open(filename, 'r') as f:
lines = [line for line in f.read().split('\n') if len(line) > 0]
header = lines[0]
if verbose:
print(header)
residues = [x for x in header.split(' ') if len(x) == 1 and x != ' ' and x != '\t']
assert len(residues) == 20
if verbose:
print(residues)
for line in lines[1:]:
cols = [
x
for x in line.split(' ')
if len(x) > 0 and x != ' ' and x != '\t'
]
assert len(cols) == 21, "Expected 20 values + letter, got %s" % cols
row_letter = cols[0]
for i, col in enumerate(cols[1:]):
col_letter = residues[i]
assert col_letter != ' ' and col_letter != '\t'
value = float(col)
add_pair(row_letter, col_letter, value)
return d
if __name__ == '__main__':
d = read_coefficients(key_type='pair_string')
print("PMBEC matrix")
for k in sorted(d):
print(k, d[k])
| dmnfarrell/epitopemap | modules/pepdata/pmbec.py | Python | apache-2.0 | 2,894 | 0.001382 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import call
from oslo_concurrency import processutils
from designate.backend.agent_backend import impl_knot2
from designate import exceptions
import designate.tests
from designate.tests.unit.agent import backends
class Knot2AgentBackendTestCase(designate.tests.TestCase):
def setUp(self):
super(Knot2AgentBackendTestCase, self).setUp()
self.backend = impl_knot2.Knot2Backend('foo')
self.backend._execute_knotc = mock.Mock()
def test_start_backend(self):
self.backend.start()
def test_stop_backend(self):
self.backend.stop()
def test_create_zone(self):
zone = backends.create_dnspy_zone('example.org')
self.backend.create_zone(zone)
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-set', 'zone[example.org]',
expected_error='duplicate identifier'),
call('conf-commit'),
call('zone-refresh', 'example.org')
])
def test_create_zone_already_there(self):
self.backend._execute_knotc.return_value = 'duplicate identifier'
zone = backends.create_dnspy_zone('example.org')
self.backend.create_zone(zone)
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-set', 'zone[example.org]',
expected_error='duplicate identifier'),
call('conf-commit'),
call('zone-refresh', 'example.org')
])
def test_start_minidns_to_knot_axfr(self):
self.backend._start_minidns_to_knot_axfr('foo')
self.backend._execute_knotc.assert_called_with('zone-refresh', 'foo')
@mock.patch('oslo_concurrency.lockutils.lock')
def test_modify_zone(self, mock_lock):
self.backend._modify_zone('blah', 'bar')
self.assertEqual(3, self.backend._execute_knotc.call_count)
self.backend._execute_knotc.assert_called_with('conf-commit')
@mock.patch('oslo_concurrency.lockutils.lock')
def test_modify_zone_exception(self, mock_lock):
# Raise an exception during the second call to _execute_knotc
self.backend._execute_knotc.side_effect = [None, exceptions.Backend,
None]
self.assertRaises(
exceptions.Backend,
self.backend._modify_zone, 'blah', 'bar'
)
self.assertEqual(3, self.backend._execute_knotc.call_count)
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('blah', 'bar'),
call('conf-abort'),
])
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial(self, mock_execute):
result = (
'[example.com.] type: slave | serial: 20 | next-event: idle | '
'auto-dnssec: disabled]'
)
mock_execute.return_value = result, ''
serial = self.backend.find_zone_serial('example.com')
self.assertEqual(20, serial)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial_zone_not_found(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError(
'error: [example.com.] (no such zone found)'
)
serial = self.backend.find_zone_serial('example.com')
self.assertIsNone(serial)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial_unexpected_output(self, mock_execute):
mock_execute.return_value = 'bogus output', ''
self.assertRaises(
exceptions.Backend,
self.backend.find_zone_serial, 'example.com'
)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_find_zone_serial_error(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError('blah')
self.assertRaises(
exceptions.Backend,
self.backend.find_zone_serial, 'example.com'
)
def test_update_zone(self):
zone = backends.create_dnspy_zone('example.org')
self.backend.update_zone(zone)
self.backend._execute_knotc.assert_called_once_with(
'zone-refresh', 'example.org'
)
def test_delete_zone(self):
self.backend.delete_zone('example.org')
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-unset', 'zone[example.org]',
expected_error='invalid identifier'),
call('conf-commit'),
])
def test_delete_zone_already_gone(self):
self.backend._execute_knotc.return_value = 'duplicate identifier'
self.backend.delete_zone('example.org')
self.backend._execute_knotc.assert_has_calls([
call('conf-begin'),
call('conf-unset', 'zone[example.org]',
expected_error='invalid identifier'),
call('conf-commit'),
])
class Knot2AgentExecuteTestCase(designate.tests.TestCase):
def setUp(self):
super(Knot2AgentExecuteTestCase, self).setUp()
self.backend = impl_knot2.Knot2Backend('foo')
def test_init(self):
self.assertEqual('knotc', self.backend._knotc_cmd_name)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_ok(self, mock_execute):
mock_execute.return_value = ('OK', '')
self.backend._execute_knotc('a1', 'a2')
mock_execute.assert_called_with('knotc', 'a1', 'a2')
self.assertEqual(1, mock_execute.call_count)
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_expected_error(self, mock_execute):
mock_execute.return_value = ('xyz', '')
self.backend._execute_knotc('a1', 'a2', expected_error='xyz')
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_expected_output(self, mock_execute):
mock_execute.return_value = ('xyz', '')
self.backend._execute_knotc('a1', 'a2', expected_output='xyz')
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_with_error(self, mock_execute):
mock_execute.return_value = ('xyz', '')
self.assertRaises(
exceptions.Backend,
self.backend._execute_knotc, 'a1', 'a2'
)
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
@mock.patch('designate.backend.agent_backend.impl_knot2.execute')
def test_execute_knotc_raising_exception(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError
self.assertRaises(
exceptions.Backend,
self.backend._execute_knotc, 'a1', 'a2'
)
mock_execute.assert_called_once_with('knotc', 'a1', 'a2')
| openstack/designate | designate/tests/unit/agent/backends/test_knot2.py | Python | apache-2.0 | 7,785 | 0 |
#!/usr/bin/env python
## \file config.py
# \brief python package for config
# \author T. Lukaczyk, F. Palacios
# \version 4.0.1 "Cardinal"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
import numpy as np
from ..util import bunch, ordered_bunch, switch
from .tools import *
from config_options import *
try:
from collections import OrderedDict
except ImportError:
from ..util.ordered_dict import OrderedDict
inf = 1.0e20
# ----------------------------------------------------------------------
# Configuration Class
# ----------------------------------------------------------------------
class Config(ordered_bunch):
""" config = SU2.io.Config(filename="")
Starts a config class, an extension of
ordered_bunch()
use 1: initialize by reading config file
config = SU2.io.Config('filename')
use 2: initialize from dictionary or bunch
config = SU2.io.Config(param_dict)
use 3: initialize empty
config = SU2.io.Config()
Parameters can be accessed by item or attribute
ie: config['MESH_FILENAME'] or config.MESH_FILENAME
Methods:
read() - read from a config file
write() - write to a config file (requires existing file)
dump() - dump a raw config file
unpack_dvs() - unpack a design vector
diff() - returns the difference from another config
dist() - computes the distance from another config
"""
_filename = 'config.cfg'
def __init__(self,*args,**kwarg):
# look for filename in inputs
if args and isinstance(args[0],str):
filename = args[0]
args = args[1:]
elif kwarg.has_key('filename'):
filename = kwarg['filename']
del kwarg['filename']
else:
filename = ''
# initialize ordered bunch
super(Config,self).__init__(*args,**kwarg)
# read config if it exists
if filename:
try:
self.read(filename)
except IOError:
print 'Could not find config file: %s' % filename
except:
print 'Unexpected error: ',sys.exc_info()[0]
raise
self._filename = filename
def read(self,filename):
""" reads from a config file """
konfig = read_config(filename)
self.update(konfig)
def write(self,filename=''):
""" updates an existing config file """
if not filename: filename = self._filename
assert os.path.exists(filename) , 'must write over an existing config file'
write_config(filename,self)
def dump(self,filename=''):
""" dumps all items in the config bunch, without comments """
if not filename: filename = self._filename
dump_config(filename,self)
def __getattr__(self,k):
try:
return super(Config,self).__getattr__(k)
except AttributeError:
raise AttributeError , 'Config parameter not found'
def __getitem__(self,k):
try:
return super(Config,self).__getitem__(k)
except KeyError:
raise KeyError , 'Config parameter not found: %s' % k
def unpack_dvs(self,dv_new,dv_old=None):
""" updates config with design variable vectors
will scale according to each DEFINITION_DV scale parameter
Modifies:
DV_KIND
DV_MARKER
DV_PARAM
DV_VALUE_OLD
DV_VALUE_NEW
Inputs:
dv_new - list or array of new dv values
dv_old - optional, list or array of old dv values, defaults to zeros
"""
dv_new = copy.deepcopy(dv_new)
dv_old = copy.deepcopy(dv_old)
# handle unpacking cases
def_dv = self['DEFINITION_DV']
n_dv = len(def_dv['KIND'])
if not dv_old: dv_old = [0.0]*n_dv
assert len(dv_new) == len(dv_old) , 'unexpected design vector length'
# handle param
param_dv = self['DV_PARAM']
# apply scale
dv_scales = def_dv['SCALE']
dv_new = [ dv_new[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
dv_old = [ dv_old[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
# Change the parameters of the design variables
self['DV_KIND'] = def_dv['KIND']
param_dv['PARAM'] = def_dv['PARAM']
param_dv['FFDTAG'] = def_dv['FFDTAG']
self.update({ 'DV_MARKER' : def_dv['MARKER'][0] ,
'DV_VALUE_OLD' : dv_old ,
'DV_VALUE_NEW' : dv_new })
def __eq__(self,konfig):
return super(Config,self).__eq__(konfig)
def __ne__(self,konfig):
return super(Config,self).__ne__(konfig)
def local_files(self):
""" removes path prefix from all *_FILENAME params
"""
for key,value in self.iteritems():
if key.split('_')[-1] == 'FILENAME':
self[key] = os.path.basename(value)
def diff(self,konfig):
""" compares self to another config
Inputs:
konfig - a second config
Outputs:
config_diff - a config containing only the differing
keys, each with values of a list of the different
config values.
for example:
config_diff.MATH_PROBLEM = ['DIRECT','CONTINUOUS_ADJOINT']
"""
keys = set([])
keys.update( self.keys() )
keys.update( konfig.keys() )
konfig_diff = Config()
for key in keys:
value1 = self.get(key,None)
value2 = konfig.get(key,None)
if not value1 == value2:
konfig_diff[key] = [value1,value2]
return konfig_diff
def dist(self,konfig,keys_check='ALL'):
""" calculates a distance to another config
Inputs:
konfig - a second config
keys_check - optional, a list of keys to check
Outputs:
distance - a float
Currently only works for DV_VALUE_NEW and DV_VALUE_OLD
Returns a large value otherwise
"""
konfig_diff = self.diff(konfig)
if keys_check == 'ALL':
keys_check = konfig_diff.keys()
distance = 0.0
for key in keys_check:
if konfig_diff.has_key(key):
val1 = konfig_diff[key][0]
val2 = konfig_diff[key][1]
if key in ['DV_VALUE_NEW',
'DV_VALUE_OLD']:
val1 = np.array( val1 )
val2 = np.array( val2 )
this_diff = np.sqrt( np.sum( (val1-val2)**2 ) )
else:
print 'Warning, unexpected config difference'
this_diff = inf
distance += this_diff
#: if key different
#: for each keys_check
return distance
def __repr__(self):
#return '<Config> %s' % self._filename
return self.__str__()
def __str__(self):
output = 'Config: %s' % self._filename
for k,v in self.iteritems():
output += '\n %s= %s' % (k,v)
return output
#: class Config
# -------------------------------------------------------------------
# Get SU2 Configuration Parameters
# -------------------------------------------------------------------
def read_config(filename):
""" reads a config file """
# initialize output dictionary
data_dict = OrderedDict()
input_file = open(filename)
# process each line
while 1:
# read the line
line = input_file.readline()
if not line:
break
# remove line returns
line = line.strip('\r\n')
# make sure it has useful data
if (not "=" in line) or (line[0] == '%'):
continue
# split across equals sign
line = line.split("=",1)
this_param = line[0].strip()
this_value = line[1].strip()
assert not data_dict.has_key(this_param) , ('Config file has multiple specifications of %s' % this_param )
for case in switch(this_param):
# comma delimited lists of strings with or without paren's
if case("MARKER_EULER") : pass
if case("MARKER_FAR") : pass
if case("MARKER_PLOTTING") : pass
if case("MARKER_MONITORING") : pass
if case("MARKER_SYM") : pass
if case("DV_KIND") :
# remove white space
this_value = ''.join(this_value.split())
# remove parens
this_value = this_value.strip('()')
# split by comma
data_dict[this_param] = this_value.split(",")
break
# semicolon delimited lists of comma delimited lists of floats
if case("DV_PARAM"):
# remove white space
info_General = ''.join(this_value.split())
# split by semicolon
info_General = info_General.split(';')
# build list of dv params, convert string to float
dv_Parameters = []
dv_FFDTag = []
for this_dvParam in info_General:
this_dvParam = this_dvParam.strip('()')
this_dvParam = this_dvParam.split(",")
# if FFD change the first element to work with numbers and float(x)
if data_dict["DV_KIND"][0] in ['FFD_SETTING','FFD_CONTROL_POINT','FFD_DIHEDRAL_ANGLE','FFD_TWIST_ANGLE','FFD_ROTATION','FFD_CAMBER','FFD_THICKNESS','FFD_CONTROL_POINT_2D','FFD_CAMBER_2D','FFD_THICKNESS_2D']:
this_dvFFDTag = this_dvParam[0]
this_dvParam[0] = '0'
else:
this_dvFFDTag = []
this_dvParam = [ float(x) for x in this_dvParam ]
dv_FFDTag = dv_FFDTag + [this_dvFFDTag]
dv_Parameters = dv_Parameters + [this_dvParam]
# store in a dictionary
dv_Definitions = { 'FFDTAG' : dv_FFDTag ,
'PARAM' : dv_Parameters }
data_dict[this_param] = dv_Definitions
break
# comma delimited lists of floats
if case("DV_VALUE_OLD") : pass
if case("DV_VALUE_NEW") : pass
if case("DV_VALUE") :
# remove white space
this_value = ''.join(this_value.split())
# split by comma, map to float, store in dictionary
data_dict[this_param] = map(float,this_value.split(","))
break
# float parameters
if case("MACH_NUMBER") : pass
if case("AoA") : pass
if case("FIN_DIFF_STEP") : pass
if case("CFL_NUMBER") : pass
if case("WRT_SOL_FREQ") :
data_dict[this_param] = float(this_value)
break
# int parameters
if case("NUMBER_PART") : pass
if case("AVAILABLE_PROC") : pass
if case("EXT_ITER") : pass
if case("TIME_INSTANCES") : pass
if case("UNST_ADJOINT_ITER") : pass
if case("ITER_AVERAGE_OBJ") : pass
if case("ADAPT_CYCLES") :
data_dict[this_param] = int(this_value)
break
# unitary design variable definition
if case("DEFINITION_DV"):
# remove white space
this_value = ''.join(this_value.split())
# split into unitary definitions
info_Unitary = this_value.split(";")
# process each Design Variable
dv_Kind = []
dv_Scale = []
dv_Markers = []
dv_FFDTag = []
dv_Parameters = []
for this_General in info_Unitary:
if not this_General: continue
# split each unitary definition into one general definition
info_General = this_General.strip("()").split("|") # check for needed strip()?
# split information for dv Kinds
info_Kind = info_General[0].split(",")
# pull processed dv values
this_dvKind = get_dvKind( int( info_Kind[0] ) )
this_dvScale = float( info_Kind[1] )
this_dvMarkers = info_General[1].split(",")
if this_dvKind=='MACH_NUMBER' or this_dvKind=='AOA':
this_dvParameters = []
else:
this_dvParameters = info_General[2].split(",")
# if FFD change the first element to work with numbers and float(x), save also the tag
if this_dvKind in ['FFD_SETTING','FFD_CONTROL_POINT','FFD_DIHEDRAL_ANGLE','FFD_TWIST_ANGLE','FFD_ROTATION','FFD_CAMBER','FFD_THICKNESS','FFD_CONTROL_POINT_2D','FFD_CAMBER_2D','FFD_THICKNESS_2D']:
this_dvFFDTag = this_dvParameters[0]
this_dvParameters[0] = '0'
else:
this_dvFFDTag = []
this_dvParameters = [ float(x) for x in this_dvParameters ]
# add to lists
dv_Kind = dv_Kind + [this_dvKind]
dv_Scale = dv_Scale + [this_dvScale]
dv_Markers = dv_Markers + [this_dvMarkers]
dv_FFDTag = dv_FFDTag + [this_dvFFDTag]
dv_Parameters = dv_Parameters + [this_dvParameters]
# store in a dictionary
dv_Definitions = { 'KIND' : dv_Kind ,
'SCALE' : dv_Scale ,
'MARKER' : dv_Markers ,
'FFDTAG' : dv_FFDTag ,
'PARAM' : dv_Parameters }
# save to output dictionary
data_dict[this_param] = dv_Definitions
break
# unitary objective definition
if case('OPT_OBJECTIVE'):
# remove white space
this_value = ''.join(this_value.split())
# split by scale
this_value = this_value.split("*")
this_name = this_value[0]
this_scale = 1.0
if len(this_value) > 1:
this_scale = float( this_value[1] )
this_def = { this_name : {'SCALE':this_scale} }
# save to output dictionary
data_dict[this_param] = this_def
break
# unitary constraint definition
if case('OPT_CONSTRAINT'):
# remove white space
this_value = ''.join(this_value.split())
# check for none case
if this_value == 'NONE':
data_dict[this_param] = {'EQUALITY':OrderedDict(), 'INEQUALITY':OrderedDict()}
break
# split definitions
this_value = this_value.split(';')
this_def = OrderedDict()
for this_con in this_value:
if not this_con: continue # if no definition
# defaults
this_obj = 'NONE'
this_sgn = '='
this_scl = 1.0
this_val = 0.0
# split scale if present
this_con = this_con.split('*')
if len(this_con) > 1:
this_scl = float( this_con[1] )
this_con = this_con[0]
# find sign
for this_sgn in ['<','>','=']:
if this_sgn in this_con: break
# split sign, store objective and value
this_con = this_con.strip('()').split(this_sgn)
assert len(this_con) == 2 , 'incorrect constraint definition'
this_obj = this_con[0]
this_val = float( this_con[1] )
# store in dictionary
this_def[this_obj] = { 'SIGN' : this_sgn ,
'VALUE' : this_val ,
'SCALE' : this_scl }
#: for each constraint definition
# sort constraints by type
this_sort = { 'EQUALITY' : OrderedDict() ,
'INEQUALITY' : OrderedDict() }
for key,value in this_def.iteritems():
if value['SIGN'] == '=':
this_sort['EQUALITY'][key] = value
else:
this_sort['INEQUALITY'][key] = value
#: for each definition
# save to output dictionary
data_dict[this_param] = this_sort
break
# otherwise
# string parameters
if case():
data_dict[this_param] = this_value
break
#: if case DEFINITION_DV
#: for case
#: for line
#hack - twl
if not data_dict.has_key('DV_VALUE_NEW'):
data_dict['DV_VALUE_NEW'] = [0]
if not data_dict.has_key('DV_VALUE_OLD'):
data_dict['DV_VALUE_OLD'] = [0]
if not data_dict.has_key('OPT_ITERATIONS'):
data_dict['OPT_ITERATIONS'] = 100
if not data_dict.has_key('OPT_ACCURACY'):
data_dict['OPT_ACCURACY'] = 1e-10
if not data_dict.has_key('BOUND_DV'):
data_dict['BOUND_DV'] = 1e10
return data_dict
#: def read_config()
# -------------------------------------------------------------------
# Set SU2 Configuration Parameters
# -------------------------------------------------------------------
def write_config(filename,param_dict):
""" updates an existing config file """
temp_filename = "temp.cfg"
shutil.copy(filename,temp_filename)
output_file = open(filename,"w")
# break pointers
param_dict = copy.deepcopy(param_dict)
for raw_line in open(temp_filename):
# remove line returns
line = raw_line.strip('\r\n')
# make sure it has useful data
if not "=" in line:
output_file.write(raw_line)
continue
# split across equals sign
line = line.split("=")
this_param = line[0].strip()
old_value = line[1].strip()
# skip if parameter unwanted
if not param_dict.has_key(this_param):
output_file.write(raw_line)
continue
# start writing parameter
new_value = param_dict[this_param]
output_file.write(this_param + "= ")
# handle parameter types
for case in switch(this_param):
# comma delimited list of floats
if case("DV_VALUE_NEW") : pass
if case("DV_VALUE_OLD") : pass
if case("DV_VALUE") :
n_lists = len(new_value)
for i_value in range(n_lists):
output_file.write("%s" % new_value[i_value])
if i_value+1 < n_lists:
output_file.write(", ")
break
# comma delimited list of strings no paren's
if case("DV_KIND") : pass
if case("TASKS") : pass
if case("GRADIENTS") :
if not isinstance(new_value,list):
new_value = [ new_value ]
n_lists = len(new_value)
for i_value in range(n_lists):
output_file.write(new_value[i_value])
if i_value+1 < n_lists:
output_file.write(", ")
break
# comma delimited list of strings inside paren's
if case("MARKER_EULER") : pass
if case("MARKER_FAR") : pass
if case("MARKER_PLOTTING") : pass
if case("MARKER_MONITORING") : pass
if case("MARKER_SYM") : pass
if case("DV_MARKER") :
if not isinstance(new_value,list):
new_value = [ new_value ]
output_file.write("( ")
n_lists = len(new_value)
for i_value in range(n_lists):
output_file.write(new_value[i_value])
if i_value+1 < n_lists:
output_file.write(", ")
output_file.write(" )")
break
# semicolon delimited lists of comma delimited lists
if case("DV_PARAM") :
assert isinstance(new_value['PARAM'],list) , 'incorrect specification of DV_PARAM'
if not isinstance(new_value['PARAM'][0],list): new_value = [ new_value ]
for i_value in range(len(new_value['PARAM'])):
output_file.write("( ")
this_param_list = new_value['PARAM'][i_value]
this_ffd_list = new_value['FFDTAG'][i_value]
n_lists = len(this_param_list)
if this_ffd_list != []:
output_file.write("%s, " % this_ffd_list)
for j_value in range(1,n_lists):
output_file.write("%s" % this_param_list[j_value])
if j_value+1 < n_lists:
output_file.write(", ")
else:
for j_value in range(n_lists):
output_file.write("%s" % this_param_list[j_value])
if j_value+1 < n_lists:
output_file.write(", ")
output_file.write(") ")
if i_value+1 < len(new_value['PARAM']):
output_file.write("; ")
break
# int parameters
if case("NUMBER_PART") : pass
if case("ADAPT_CYCLES") : pass
if case("TIME_INSTANCES") : pass
if case("AVAILABLE_PROC") : pass
if case("UNST_ADJOINT_ITER") : pass
if case("EXT_ITER") :
output_file.write("%i" % new_value)
break
if case("DEFINITION_DV") :
n_dv = len(new_value['KIND'])
if not n_dv:
output_file.write("NONE")
for i_dv in range(n_dv):
this_kind = new_value['KIND'][i_dv]
output_file.write("( ")
output_file.write("%i , " % get_dvID(this_kind) )
output_file.write("%s " % new_value['SCALE'][i_dv])
output_file.write("| ")
# markers
n_mark = len(new_value['MARKER'][i_dv])
for i_mark in range(n_mark):
output_file.write("%s " % new_value['MARKER'][i_dv][i_mark])
if i_mark+1 < n_mark:
output_file.write(", ")
#: for each marker
if not this_kind in ['AOA','MACH_NUMBER']:
output_file.write(" | ")
# params
if this_kind in ['FFD_SETTING','FFD_CONTROL_POINT','FFD_DIHEDRAL_ANGLE','FFD_TWIST_ANGLE','FFD_ROTATION','FFD_CAMBER','FFD_THICKNESS','FFD_CONTROL_POINT_2D','FFD_CAMBER_2D','FFD_THICKNESS_2D']:
n_param = len(new_value['PARAM'][i_dv])
output_file.write("%s , " % new_value['FFDTAG'][i_dv])
for i_param in range(1,n_param):
output_file.write("%s " % new_value['PARAM'][i_dv][i_param])
if i_param+1 < n_param:
output_file.write(", ")
else:
n_param = len(new_value['PARAM'][i_dv])
for i_param in range(n_param):
output_file.write("%s " % new_value['PARAM'][i_dv][i_param])
if i_param+1 < n_param:
output_file.write(", ")
#: for each param
output_file.write(" )")
if i_dv+1 < n_dv:
output_file.write("; ")
#: for each dv
break
if case("OPT_OBJECTIVE"):
assert len(new_value.keys())==1 , 'only one OPT_OBJECTIVE is currently supported'
i_name = 0
for name,value in new_value.iteritems():
if i_name>0: output_file.write("; ")
output_file.write( "%s * %s" % (name,value['SCALE']) )
i_name += 1
break
if case("OPT_CONSTRAINT"):
i_con = 0
for con_type in ['EQUALITY','INEQUALITY']:
this_con = new_value[con_type]
for name,value in this_con.iteritems():
if i_con>0: output_file.write("; ")
output_file.write( "( %s %s %s ) * %s"
% (name, value['SIGN'], value['VALUE'], value['SCALE']) )
i_con += 1
#: for each constraint
#: for each constraint type
if not i_con: output_file.write("NONE")
break
# default, assume string, integer or unformatted float
if case():
output_file.write('%s' % new_value)
break
#: for case
# remove from param dictionary
del param_dict[this_param]
# next line
output_file.write("\n")
#: for each line
# check that all params were used
for this_param in param_dict.keys():
if not this_param in ['JOB_NUMBER']:
print ( 'Warning: Parameter %s not found in config file and was not written' % (this_param) )
output_file.close()
os.remove( temp_filename )
#: def write_config()
def dump_config(filename,config):
''' dumps a raw config file with all options in config
and no comments
'''
# HACK - twl
if config.has_key('DV_VALUE_NEW'):
config.DV_VALUE = config.DV_VALUE_NEW
config_file = open(filename,'w')
# write dummy file
for key in config.keys():
config_file.write( '%s= 0 \n' % key )
config_file.close()
# dump data
write_config(filename,config)
| Heathckliff/SU2 | SU2_PY/SU2/io/config.py | Python | lgpl-2.1 | 30,190 | 0.016827 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pkg_resources import WorkingSet
from pants.base.exceptions import BuildConfigurationError
from pants.bin.goal_runner import OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
def test_invalid_version():
options_bootstrapper = OptionsBootstrapper(args=['--pants-version=99.99.9999'])
with pytest.raises(BuildConfigurationError):
OptionsInitializer(options_bootstrapper, WorkingSet()).setup()
| jtrobec/pants | tests/python/pants_test/bin/test_goal_runner.py | Python | apache-2.0 | 756 | 0.005291 |
def extractTokyoESPScans(item):
"""
Tokyo ESP Scans
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractTokyoESPScans.py | Python | bsd-3-clause | 230 | 0.030435 |
"""
42. Storing files according to a custom storage system
``FileField`` and its variations can take a ``storage`` argument to specify how
and where files should be stored.
"""
import random
import tempfile
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
class Storage(models.Model):
def custom_upload_to(self, filename):
return 'foo'
def random_upload_to(self, filename):
# This returns a different result each time,
# to make sure it only gets called once.
return '%s/%s' % (random.randint(100, 999), filename)
normal = models.FileField(storage=temp_storage, upload_to='tests')
custom = models.FileField(storage=temp_storage, upload_to=custom_upload_to)
random = models.FileField(storage=temp_storage, upload_to=random_upload_to)
default = models.FileField(storage=temp_storage, upload_to='tests', default='tests/default.txt')
| skevy/django | tests/modeltests/files/models.py | Python | bsd-3-clause | 1,085 | 0.001843 |
# -*- coding: utf-8 -*-
__title__ = 'pywebtask'
__version__ = '0.1.8'
__build__ = 0x000108
__author__ = 'Sebastián José Seba'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Sebastián José Seba'
from .webtasks import run, run_file
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| ssebastianj/pywebtasks | pywebtasks/__init__.py | Python | mit | 546 | 0 |
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| jb-old/chii | quoth/uwsgi_app.py | Python | unlicense | 148 | 0.006757 |
"""Graphical user interface."""
import collections
import ctypes
import sdl2
import hienoi.renderer
from hienoi._common import GLProfile, GraphicsAPI, ParticleDisplay, UserData
from hienoi._vectors import Vector2i, Vector2f, Vector4f
class NavigationAction(object):
"""Enumerator for the current nagivation action.
Attributes
----------
NONE
MOVE
ZOOM
"""
NONE = 0
MOVE = 1
ZOOM = 2
_Handles = collections.namedtuple(
'_Handles', (
'window',
'renderer',
))
_GLHandles = collections.namedtuple(
'_GLHandles', (
'context',
))
_RGBMasks = collections.namedtuple(
'_RGBMasks', (
'red',
'green',
'blue',
))
_FIT_VIEW_REL_PADDING = 2.0
if sdl2.SDL_BYTEORDER == sdl2.SDL_LIL_ENDIAN:
_RGB_MASKS = _RGBMasks(red=0x000000FF, green=0x0000FF00, blue=0x00FF0000)
else:
_RGB_MASKS = _RGBMasks(red=0x00FF0000, green=0x0000FF00, blue=0x000000FF)
class GUI(object):
"""GUI.
Parameters
----------
window_title : str
Title for the window.
window_position : hienoi.Vector2i
Initial window position.
window_size : hienoi.Vector2i
Initial window size.
window_flags : int
SDL2 window flags.
view_aperture_x : float
Initial length in world units to be shown on the X axis.
view_zoom_range : hienoi.Vector2f
Zoom value range for the view.
mouse_wheel_step : float
Coefficient value for each mouse wheel step.
grid_density : float
See :attr:`GUI.grid_density`.
grid_adaptive_threshold : float
See :attr:`GUI.grid_adaptive_threshold`.
show_grid : bool
See :attr:`GUI.show_grid`.
background_color : hienoi.Vector4f
See :attr:`GUI.background_color`.
grid_color : hienoi.Vector4f
See :attr:`GUI.grid_color`.
grid_origin_color : hienoi.Vector4f
See :attr:`GUI.grid_origin_color`.
particle_display : int
See :attr:`GUI.particle_display`.
point_size : int
See :attr:`GUI.point_size`.
edge_feather : float
See :attr:`GUI.edge_feather`.
stroke_width : float
See :attr:`GUI.stroke_width`.
initialize_callback : function
Callback function to initialize any GUI state.
It takes a single argument ``gui``, an instance of this class.
on_event_callback : function
Callback function ran during the event polling.
It takes 3 arguments: ``gui``, an instance of this class,
``data``, some data to pass back and forth between the caller and this
callback function, and ``event``, the event fired.
renderer : dict
Keyword arguments for the configuration of the renderer. See the
parameters for the class :class:`hienoi.renderer.Renderer`.
Attributes
----------
view_position : hienoi.Vector2f
Position of the view (camera).
view_zoom : float
Current zoom value for the view.
grid_density : float
Density of the grid.
A density of 10.0 means that there are around 10 grid divisions
displayed on the X axis. A grid division unit represents a fixed length
in world units, meaning that the actual grid density changes depending
on the view's zoom.
show_grid : bool
True to show the grid.
background_color : hienoi.Vector4f
Color for the background.
grid_color : hienoi.Vector4f
Color for the grid.
grid_origin_color : hienoi.Vector4f
Color for the origin axis of the grid.
particle_display : int
Display mode for the particles. Available values are enumerated in the
:class:`~hienoi.ParticleDisplay` class.
point_size : int
Size of the particles in pixels when the display mode is set to
:attr:`~hienoi.ParticleDisplay.POINT`.
edge_feather : float
Feather fall-off in pixels to apply to objects drawn with displays such
as :attr:`~hienoi.ParticleDisplay.CIRCLE` or
:attr:`~hienoi.ParticleDisplay.DISC`.
stroke_width : float
Width of the stroke in pixels to apply to objects drawn with displays
such as :attr:`~hienoi.ParticleDisplay.CIRCLE`.
quit : bool
``True`` to signal to the application that it should quit.
has_view_changed : bool
``True`` if the view state has just been changed following an event. It
is reset to ``False`` whenever :meth:`poll_events` is called.
user_data : object
Attribute reserved for any user data.
"""
def __init__(self,
window_title='hienoi',
window_position=Vector2i(sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED),
window_size=Vector2i(800, 600),
window_flags=sdl2.SDL_WINDOW_RESIZABLE,
view_aperture_x=100.0,
view_zoom_range=Vector2f(1e-6, 1e+6),
mouse_wheel_step=0.01,
grid_density=10.0,
grid_adaptive_threshold=3.0,
show_grid=True,
background_color=Vector4f(0.15, 0.15, 0.15, 1.0),
grid_color=Vector4f(0.85, 0.85, 0.85, 0.05),
grid_origin_color=Vector4f(0.85, 0.25, 0.25, 0.25),
particle_display=ParticleDisplay.DISC,
point_size=4,
edge_feather=2.0,
stroke_width=0.0,
initialize_callback=None,
on_event_callback=None,
renderer=None):
renderer = {} if renderer is None else renderer
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0:
raise RuntimeError(sdl2.SDL_GetError().decode())
renderer_info = hienoi.renderer.get_info()
if renderer_info.api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,
renderer_info.major_version)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION,
renderer_info.minor_version)
if renderer_info.profile == GLProfile.CORE:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
self._handles = _create_handles(window_title, window_position,
window_size, window_flags,
renderer_info)
self._renderer = hienoi.renderer.Renderer(**renderer)
self._initial_view_aperture_x = view_aperture_x
self._view_zoom_range = view_zoom_range
self._mouse_wheel_step = mouse_wheel_step
self._grid_adaptive_threshold = grid_adaptive_threshold
self._on_event_callback = on_event_callback
self._listen_for_navigation = False
self._is_view_manipulated = False
self.view_position = Vector2f(0.0, 0.0)
self._view_zoom = 1.0
self.grid_density = grid_density
self.show_grid = show_grid
self.background_color = background_color
self.grid_color = grid_color
self.grid_origin_color = grid_origin_color
self.particle_display = particle_display
self.point_size = point_size
self.edge_feather = edge_feather
self.stroke_width = stroke_width
self._navigation_action = NavigationAction.NONE
self.quit = False
self.user_data = UserData()
if initialize_callback:
initialize_callback(self)
@property
def view_zoom(self):
return self._view_zoom
@view_zoom.setter
def view_zoom(self, value):
self._view_zoom = max(self._view_zoom_range[0],
min(self._view_zoom_range[1], value))
@property
def navigation_action(self):
return self._navigation_action
@property
def has_view_changed(self):
return self._has_view_changed
def poll_events(self, scene_state, data=None):
"""Process each event in the queue.
Parameters
----------
scene_state : hienoi.renderer.SceneState
Scene state.
data : object
Data to pass back and forth between the caller and the function set
for the 'on event' callback.
"""
self._has_view_changed = False
event = sdl2.SDL_Event()
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
event_type = event.type
if event_type == sdl2.SDL_QUIT:
self._on_quit_event(event.quit)
elif event_type == sdl2.SDL_WINDOWEVENT:
self._on_window_event(event.window)
elif event_type == sdl2.SDL_KEYDOWN:
self._on_key_down_event(event.key, scene_state)
elif event_type == sdl2.SDL_KEYUP:
self._on_key_up_event(event.key)
elif event_type == sdl2.SDL_MOUSEBUTTONDOWN:
self._on_mouse_button_down_event(event.button)
elif event_type == sdl2.SDL_MOUSEBUTTONUP:
self._on_mouse_button_up_event(event.button)
elif event_type == sdl2.SDL_MOUSEWHEEL:
self._on_mouse_wheel_event(event.wheel)
elif event_type == sdl2.SDL_MOUSEMOTION:
self._on_mouse_motion_event(event.motion)
if self._on_event_callback:
self._on_event_callback(self, data, event)
if self.quit:
break
def render(self, scene_state):
"""Render a new frame.
Parameters
----------
scene_state : hienoi.renderer.SceneState
Scene state.
"""
renderer_state = hienoi.renderer.State(
window_size=self.get_window_size(),
view_position=self.view_position,
view_zoom=self._view_zoom,
origin=self.world_to_screen(Vector2f(0.0, 0.0)),
initial_view_aperture_x=self._initial_view_aperture_x,
view_aperture=self.get_view_aperture(),
grid_density=self.grid_density,
grid_adaptive_threshold=self._grid_adaptive_threshold,
background_color=self.background_color,
grid_color=self.grid_color,
grid_origin_color=self.grid_origin_color,
show_grid=self.show_grid,
particle_display=self.particle_display,
point_size=self.point_size,
edge_feather=self.edge_feather,
stroke_width=self.stroke_width,
)
self._renderer.render(renderer_state, scene_state)
if hienoi.renderer.get_info().api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SwapWindow(self._handles.window)
def terminate(self):
"""Cleanup the GUI resources."""
self._renderer.cleanup()
if hienoi.renderer.get_info().api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_DeleteContext(self._handles.renderer.context)
sdl2.SDL_DestroyWindow(self._handles.window)
sdl2.SDL_Quit()
def get_window_size(self):
"""Retrieve the window size.
Returns
-------
hienoi.Vector2i
The window size.
"""
window_size_x = ctypes.c_int()
window_size_y = ctypes.c_int()
sdl2.SDL_GetWindowSize(self._handles.window,
ctypes.byref(window_size_x),
ctypes.byref(window_size_y))
return Vector2i(window_size_x.value, window_size_y.value)
def get_view_aperture(self):
"""Retrieve the view aperture.
It represents the area in world units covered by the view.
Returns
-------
hienoi.Vector2f
The view aperture.
"""
window_size = self.get_window_size()
aperture_x = self._initial_view_aperture_x / self._view_zoom
return Vector2f(aperture_x, aperture_x * window_size.y / window_size.x)
def get_mouse_position(self):
"""Retrieve the mouse position in screen space.
Returns
-------
hienoi.Vector2i
The mouse position.
"""
position_x = ctypes.c_int()
position_y = ctypes.c_int()
sdl2.SDL_GetMouseState(ctypes.byref(position_x),
ctypes.byref(position_y))
return Vector2i(position_x.value, position_y.value)
def get_screen_to_world_ratio(self):
"""Retrieve the ratio to convert a sreen unit into a world unit.
Returns
-------
float
The screen to world ratio.
"""
window_size = self.get_window_size()
aperture_x = self._initial_view_aperture_x / self._view_zoom
return aperture_x / window_size.x
def screen_to_world(self, point):
"""Convert a point from screen space to world space coordinates.
Parameters
----------
point : hienoi.Vector2i
Point in screen space coordinates.
Returns
-------
hienoi.Vector2f
The point in world space coordinates.
"""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
return Vector2f(
(self.view_position.x
+ (point.x - window_size.x / 2.0)
* view_aperture.x / window_size.x),
(self.view_position.y
- (point.y - window_size.y / 2.0)
* view_aperture.y / window_size.y))
def world_to_screen(self, point):
"""Convert a point from world space to screen space coordinates.
Parameters
----------
point : hienoi.Vector2f
Point in world space coordinates.
Returns
-------
hienoi.Vector2i
The point in screen space coordinates.
"""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
return Vector2i(
int(round(
(window_size.x / view_aperture.x)
* (-self.view_position.x + point.x + view_aperture.x / 2.0))),
int(round(
(window_size.y / view_aperture.y)
* (self.view_position.y - point.y + view_aperture.y / 2.0))))
def write_snapshot(self, filename):
"""Take a snapshot of the view and write it as a BMP image.
Parameters
----------
filename : str
Destination filename.
"""
pixel_size = 4
pixels = self._renderer.read_pixels()
surface = sdl2.SDL_CreateRGBSurfaceFrom(
pixels.data, pixels.width, pixels.height,
8 * pixel_size, pixels.width * pixel_size,
_RGB_MASKS.red, _RGB_MASKS.green, _RGB_MASKS.blue, 0)
sdl2.SDL_SaveBMP(surface, filename)
sdl2.SDL_FreeSurface(surface)
def _reset_view(self):
"""Reset the view position and zoom."""
self.view_position = Vector2f(0.0, 0.0)
self.view_zoom = 1.0
self._has_view_changed = True
def _fit_view(self, scene_state):
"""Fit the view to the scene."""
if len(scene_state.particles) > 1:
window_size = self.get_window_size()
initial_size = Vector2f(
self._initial_view_aperture_x,
self._initial_view_aperture_x * window_size.y / window_size.x)
lower_bounds = scene_state.lower_bounds
upper_bounds = scene_state.upper_bounds
required_size = (upper_bounds - lower_bounds).iscale(
_FIT_VIEW_REL_PADDING)
required_size = Vector2f(
max(required_size.x,
initial_size.x * self._view_zoom_range[0]),
max(required_size.y,
initial_size.y * self._view_zoom_range[0]))
self.view_position = (lower_bounds + upper_bounds).iscale(0.5)
self.view_zoom = min(initial_size.x / required_size.x,
initial_size.y / required_size.y)
elif len(scene_state.particles) == 1:
self.view_position = Vector2f(
*scene_state.particles['position'][0])
self.view_zoom = 1.0
else:
self._reset_view()
self._has_view_changed = True
def _on_quit_event(self, event):
"""Event 'on quit'."""
self.quit = True
def _on_window_event(self, event):
"""Event 'on window'."""
if event.event == sdl2.SDL_WINDOWEVENT_SIZE_CHANGED:
self._renderer.resize(event.data1, event.data2)
def _on_key_down_event(self, event, scene_state):
"""Event 'on key down'."""
code = event.keysym.sym
modifier = event.keysym.mod
if modifier == sdl2.KMOD_NONE:
if code == sdl2.SDLK_SPACE:
self._listen_for_navigation = True
elif code == sdl2.SDLK_d:
self.particle_display = (
(self.particle_display + 1) % (ParticleDisplay._LAST + 1))
elif code == sdl2.SDLK_f:
self._fit_view(scene_state)
elif code == sdl2.SDLK_g:
self.show_grid = not self.show_grid
elif code == sdl2.SDLK_r:
self._reset_view()
def _on_key_up_event(self, event):
"""Event 'on key up'."""
code = event.keysym.sym
if code == sdl2.SDLK_SPACE:
self._listen_for_navigation = False
def _on_mouse_button_down_event(self, event):
"""Event 'on mouse button down'."""
if self._listen_for_navigation:
if event.button == sdl2.SDL_BUTTON_LEFT:
self._navigation_action = NavigationAction.MOVE
elif event.button == sdl2.SDL_BUTTON_RIGHT:
self._navigation_action = NavigationAction.ZOOM
def _on_mouse_button_up_event(self, event):
"""Event 'on mouse button up'."""
if (event.button == sdl2.SDL_BUTTON_LEFT
or event.button == sdl2.SDL_BUTTON_RIGHT):
self._navigation_action = NavigationAction.NONE
def _on_mouse_wheel_event(self, event):
"""Event 'on mouse wheel'."""
scale = 1.0 + self._mouse_wheel_step * event.y
self.view_zoom *= scale
self._has_view_changed = True
def _on_mouse_motion_event(self, event):
"""Event 'on mouse motion'."""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
if self._navigation_action == NavigationAction.MOVE:
self.view_position.set(
(self.view_position.x
- event.xrel * view_aperture.x / window_size.x),
(self.view_position.y
+ event.yrel * view_aperture.y / window_size.y))
self._has_view_changed = True
elif self._navigation_action == NavigationAction.ZOOM:
scale = (1.0
+ float(event.xrel) / window_size.x
- float(event.yrel) / window_size.y)
self.view_zoom *= scale
self._has_view_changed = True
def _create_handles(window_title, window_position, window_size, window_flags,
renderer_info):
"""Create the SDL2 handles."""
window_flags = sdl2.SDL_WINDOW_SHOWN | window_flags
if renderer_info.api == GraphicsAPI.OPENGL:
window_flags |= sdl2.SDL_WINDOW_OPENGL
window = sdl2.SDL_CreateWindow(
window_title.encode(),
window_position.x, window_position.y,
window_size.x, window_size.y,
window_flags)
if not window:
raise RuntimeError(sdl2.SDL_GetError().decode())
context = sdl2.SDL_GL_CreateContext(window)
if not context:
raise RuntimeError(sdl2.SDL_GetError().decode())
# Try to disable the vertical synchronization. It applies to the active
# context and thus needs to be called after `SDL_GL_CreateContext`.
sdl2.SDL_GL_SetSwapInterval(0)
return _Handles(
window=window,
renderer=_GLHandles(context=context))
| christophercrouzet/hienoi | hienoi/gui.py | Python | mit | 20,220 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-20 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0004_auto_20170520_1931'),
]
operations = [
migrations.AddField(
model_name='question',
name='announce',
field=models.BooleanField(default=False, verbose_name='Anuncio'),
),
migrations.AddField(
model_name='question',
name='label',
field=models.IntegerField(choices=[(0, ''), (1, 'Ayuda!'), (2, 'Resuelta'), (3, 'Discusión'), (4, 'Tutorial'), (5, 'Ejemplo'), (6, 'Recurso'), (7, 'Juego')], default=0, verbose_name='Etiqueta'),
),
]
| hcosta/escueladevideojuegos.net-backend-django | edv/reddit/migrations/0005_auto_20170520_2005.py | Python | gpl-3.0 | 788 | 0.001271 |
"""Module Description
Copyright (c) 2008 H. Gene Shin <shin@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: H. Gene Shin
@contact: shin@jimmy.harvard.edu
"""
# ------------------------------------
# Python modules
# ------------------------------------
import sys,time,re,operator,copy,sqlite3,warnings
import itertools
from array import *
import bisect
# ------------------------------------
# My own Python modules
# ------------------------------------
from CEAS.inout import *
from CEAS.tables import *
from CEAS.sampler import *
from CEAS.corelib import *
import CEAS.Prob as Prob
#-------------------------------------
# classes
#-------------------------------------
class Annotator:
"""Annotator Class
This class annotates a list of genome coordinates and gives a summary of annotation.
1. Annotater.annotate() annotates a list of genome coordinates (locations) based on a gene annotation table (e.g., refSeq provided by UCSC).
2. Annotatoer.summarize() summarizes the annotation in a table (See tables.py)
"""
def __init__(self):
"""Constructor"""
pass
def annotate(self,genome_coordinates=None,gene_table=None,roi=None,prom=(1000, 2000, 3000),biprom=(2500, 5000), down=(1000, 2000, 3000), gene_div=(3,5),quantize=True):
"""Annotate given coordinates based on the given gene table."""
# get the chromsomes of the gene table and genome coordinates
try:
chroms_gc=genome_coordinates.keys()
chroms_gt=gene_table.get_chroms()
chroms_gt,chroms_gc=set(chroms_gt),set(chroms_gc)
chroms=chroms_gt.intersection(chroms_gc)
chroms=list(chroms)
chroms.sort()
num_coordinates={}
num_genes={}
for chrom in chroms:
num_coordinates[chrom]=len(genome_coordinates[chrom])
num_genes[chrom]=len(gene_table[chrom][gene_table[chrom].keys()[0]])
except AttributeError:
raise Exception('Genome coordinates and gene table must be given for genome annotation')
#initialize with an empty dictionary
table=AnnotTable()
#iterate over the chromosomes
for chrom in chroms:
genes=gene_table[chrom]
num_genes_this_chr=num_genes[chrom]
coordinates=genome_coordinates[chrom]
num_coordinates_this_chr=num_coordinates[chrom]
table.init_table(chrom)
#initialize the promoter distances. This will be used in obtaining bidirectional promoter too.
prom_dists=[[0,0] for i in xrange(num_coordinates_this_chr)]
# get the nearest genes to set the searching range for promoter and downstream
nearest_genes=self.find_nearest_genes(genes)
# point begin
pointerBeg=0
maxprom = max(prom[-1], biprom[-1])
maxdown = down[-1]
for i in xrange(0,num_genes_this_chr):
# get the strand of the gene
try:
strand=genes['strand'][i]
except KeyError:
raise Exception("'strand' must be included in the gene annotation table for running CEAS")
# get the beginning and end point of search
# the beginning and end points are the end of the previous gene and the beginning of the next gene.
beg,end=0,0
try:
if strand=='+':
beg=max(genes['txStart'][i]-maxprom, nearest_genes['before'][i])
end=min(genes['txEnd'][i]+maxdown, nearest_genes['after'][i])
else:
beg=max(genes['txStart'][i]-maxdown, nearest_genes['before'][i])
end=min(genes['txEnd'][i]+maxprom, nearest_genes['after'][i])
except KeyError: # check the gene annotation table has necessary columns
raise Exception("'txStart' and 'txEnd' must be included in the gene annotation table for running CEAS")
### test block-out ###
# set search index j to the begining point of the last gene. This makes sure that we include isoforms
j=pointerBeg
###
### test block-in ###
#j = bisect.bisect_left(coordinates, beg)
###
if coordinates[j]>end: continue
### test block-out ###
# two while loops to detect the annotation start coordinate for the current gene.
while j>0 and coordinates[j]>=beg:
j-=1
while j<num_coordinates_this_chr and coordinates[j]<beg:
if j>=table.size(chrom)[0]:
table.add_row(chrom,[coordinates[j]]+[0]*table.get_column_num())
j+=1
###
# if get to the end of chromosome, then break
if j==num_coordinates_this_chr: break
### test block-out
# save the current start point for the next gene
pointerBeg=j
###
# otherwise, get the annotations of the probes related with the current gene
while j<num_coordinates_this_chr and (coordinates[j]>=beg and coordinates[j]<=end):
# get the annotation and update the entire annotation table
single_annot=self.annotate_single(coordinates[j],strand,genes['txStart'][i],genes['txEnd'][i],\
genes['cdsStart'][i],genes['cdsEnd'][i],genes['exonStarts'][i],genes['exonEnds'][i],prom,down,gene_div,biprom)
self.update_annot_table(table,single_annot,coordinates,prom_dists,chrom,j,biprom)
j+=1
# quantize promoter, bipromoter and downstream
if quantize:
table[chrom]['promoter']=ordinate2(table[chrom]['promoter'],prom)
table[chrom]['bipromoter']=ordinate2(table[chrom]['bipromoter'],biprom)
table[chrom]['downstream']=ordinate2(table[chrom]['downstream'],down)
if roi:
roichroms = roi.get_chroms()
for chrom in chroms:
table[chrom]['roi']=[0]*len(genome_coordinates[chrom])
if chrom in roichroms:
self.do_annotation_roi(genome_coordinates[chrom], table[chrom], roi[chrom])
return table
def annotate_single(self,coordinate,strand,txStart,txEnd,cdsStart,cdsEnd,exonStarts,exonEnds,prom,down,gene_div,biprom):
"""Annotate a single genome coordinate
Parameters:
1. coordinate: a list of genome locations to annotate
2. strand: the strand (+/-) of the gene
3. txStart: transcription start
4. txEnd: transcription end
5. cdsStart: translation start
6. cdsEnd: translation end
7. exonStarts: exon start locations
8. exonEnds: exon end locations
9. prom: promoter lengths (e.g., (1000, 2000, 3000))
10. down: downstream lengths (e.g., (1000, 2000, 3000))
11. gene: the number of divisions of a gene (eg, (3,5))
"""
# container of the annotation for a single location.
single_annot=None
maxprom = max(prom[-1], biprom[-1])
maxdown = down[-1]
# get the annotation for the location
# + strand
if strand=='+':
# promoter
if coordinate<txStart and coordinate>=txStart-maxprom: # txStart-promo <= prob < txStart
single_annot=['promoter',txStart-coordinate]
# downstream
elif coordinate>=txEnd and coordinate<txEnd+maxdown:# txEnd <= prob < txEnd+down
single_annot=['downstream',coordinate-txEnd]
# gene
elif coordinate>=txStart and coordinate<txEnd:
isExon=self.exonify(coordinate,exonStarts,exonEnds)
# exon
if isExon != None:
# coding exon
if coordinate>=cdsStart and coordinate<cdsEnd: single_annot=['gene',3,self.get_rel_loc_cds(coordinate,strand,exonStarts,exonEnds,isExon,gene_div)]
# 5'UTR
elif coordinate<cdsStart: single_annot=['gene',1]
# 3'UTR
else: single_annot=['gene',2]
# intron
else:
single_annot=['gene',4]
# relative location within the gene
single_annot.insert(2,self.get_rel_loc(coordinate,strand,txStart,txEnd,gene_div))
# - strand
else:
# promoter
if coordinate>=txEnd and coordinate<txEnd+maxprom:
single_annot=['promoter',txEnd-coordinate]
# downstream
elif coordinate<txStart and coordinate>=txStart-maxdown:
single_annot=['downstream',txStart-coordinate]
# gene
elif coordinate>=txStart and coordinate<txEnd:
isExon=self.exonify(coordinate,exonStarts,exonEnds) #then in an exon
# exon
if isExon != None:
# coding exon
if coordinate>=cdsStart and coordinate<cdsEnd: single_annot=['gene',3,self.get_rel_loc_cds(coordinate,strand,exonStarts,exonEnds,isExon,gene_div)]
# 5'UTR
elif coordinate>=cdsEnd: single_annot=['gene',1]
# 3'UTR
else: single_annot=['gene',2]
# intron
else:
single_annot=['gene',4]
# relative location within the gene
single_annot.insert(2,self.get_rel_loc(coordinate,strand,txStart,txEnd,gene_div))
return single_annot
def exonify(self,coordinate,exonStarts,exonEnds):
"""Return which exon a coordinate falls in"""
hit=None
for i in xrange(0, len(exonStarts)):
try:
if coordinate>=exonStarts[i] and coordinate<exonEnds[i]: hit=i
except IndexError:
raise Exception("'exonStarts' and 'exonEnds' must have the same number of elments")
return hit
def get_rel_loc(self,coordinate,strand,txStart,txEnd,gene_div):
"""Return the relative location within the gene"""
lengths=[(i,int(round(1.0*(txEnd-txStart+1)/i))) for i in gene_div]
if strand=='+':
return [min((coordinate-txStart+1)/length+1,i) for i,length in lengths]
else:
return [min((txEnd-coordinate+1)/length+1,i) for i,length in lengths]
def get_rel_loc_cds(self,coordinate,strand,exonStarts,exonEnds,isExon,gene_div):
"""Return the relative locaiton within the CDS"""
array_subtractor=lambda array1,array2: array('l',[array1[i]-array2[i] for i in range(0,len(array1))])
cdsLength=sum(array_subtractor(exonEnds,exonStarts))
lengths=[(i,int(round(1.0*cdsLength/i))) for i in gene_div]
if strand=='+':
return [min((coordinate-exonStarts[isExon]+sum(array_subtractor(exonEnds[:isExon],exonStarts[:isExon])))/length+1,i) for i,length in lengths]
else:
return [min((exonEnds[isExon]-coordinate+sum(array_subtractor(exonEnds[isExon+1:],exonStarts[isExon+1:])))/length+1,i) for i,length in lengths]
def update_annot_table(self,table,single_annot,coordinates,prom_dists,chrom,index_coord,biprom):
"""Update the annotation table by comparing the current annotation (single_annot) and the existing annotation in the table"""
categories=['promoter','bipromoter','downstream','gene']
# If the current annotation is NoneType, which means that the curren genome coordinate does not belong to any of
# promoters,downstreams,or genes, just add an empty annotation to the end of the annotation table
maxbiprom = biprom[-1]
try:
category=single_annot[0]
except TypeError: # when single_annot=None
if index_coord>=table.size(chrom)[0]:
table.add_row(chrom,[coordinates[index_coord]]+[0]*table.get_column_num())
return
# When an annotation does not exist at the current genome coordinate, append the new annotation
# and update
try:
original=table[chrom][category][index_coord]
except IndexError:
table.add_row(chrom,[coordinates[index_coord]]+[0]*table.get_column_num())
original=table[chrom][category][index_coord]
# +/- promoter distances
prom_dist=prom_dists[index_coord]
# when an annotation already exists at the current genome coordinate
# scaler=lambda dist,maxrange,scale:int(1.0*max(1,dist-1)*scale/maxrange)+1
new=single_annot[1]
# promoter and bipromoter
if category=='promoter':
# promoter
# update + and - distances
if new < 0:
if prom_dist[0]==0:
prom_dist[0]=new
else:
prom_dist[0]=max(new,prom_dist[0])
elif new > 0:
if prom_dist[1]==0:
prom_dist[1]=new
else:
prom_dist[1]=min(new,prom_dist[1])
# update the promoter annnotation of the current probe
if original==0:
if prom_dist[0]==0 or prom_dist[1]==0:
fin_prom=abs(sum(prom_dist))
else:
fin_prom=min(abs(prom_dist[0]),prom_dist[1])
else:
if prom_dist[0]==0 and prom_dist[1]==0:
fin_prom=original
elif prom_dist[0]==0 and prom_dist[1]!=0:
fin_prom=min(prom_dist[1],original)
elif prom_dist[0]!=0 and prom_dist[1]==0:
fin_prom=min(abs(prom_dist[0]),original)
else:
fin_prom=min([abs(prom_dist[0]),prom_dist[1],original])
#update the table with the shortest distance from TSS
table[chrom][category][index_coord]=fin_prom
# bidirectional promoter
# if + and - distances have non-zero values, that is bidirectional promoter
if prom_dist[0]*prom_dist[1] != 0:
new_biprom=abs(prom_dist[0])+prom_dist[1]
#if new_biprom<=cutoff:
if new_biprom<=maxbiprom:
# original bipromoter value
ori_biprom=table[chrom]['bipromoter'][index_coord]
# update the bipromoter
if ori_biprom==0: fin_biprom=new_biprom
else: fin_biprom=min(ori_biprom,new_biprom)
table[chrom]['bipromoter'][index_coord]=fin_biprom
elif category=='downstream':
if original==0: fin_down=new
else: fin_down=min(new,original)
table[chrom][category][index_coord]=fin_down
elif category=='gene':
if original==0: fin_gene=new
else: fin_gene=min(new,original)
table[chrom][category][index_coord]=fin_gene
table[chrom]['rel_loc'][index_coord]=copy.deepcopy(single_annot[2])
try:
table[chrom]['rel_loc_cds'][index_coord]=copy.deepcopy(single_annot[3])
except IndexError:
pass
def find_nearest_genes(self,genes):
"""Given a list of genes, find their nearest genes ahead of them
Parameters:
1. genes: a dictionary of gene annotation table. If GeneTable object (see inout.py) is used, simply GeneTable[chrom].
Return:
nearest_genes: {'before':[n1,n2,...],'after':[m1,m2,...]}. The ith elements under 'txStart' and 'txEnd' represent
the end and start of the nearest genes before and after the ith gene.
"""
nearest_genes={'before':[],'after':[]}
num_genes=len(genes[genes.keys()[0]])
for i in xrange(num_genes):
j=i+1
while j<num_genes and genes['txEnd'][i]>=genes['txStart'][j]:
j+=1
j=min(num_genes-1,j)
if genes['txEnd'][i]>=genes['txStart'][j]:
nearest_genes['after'].append(genes['txEnd'][num_genes-1]+1000000)
else:
nearest_genes['after'].append(genes['txStart'][j])
# find the nearest gene before the current gene
j=i-1
while j>=0 and genes['txStart'][i]<=genes['txEnd'][j]:
j-=1
j=max(0,j)
if genes['txStart'][i]<=genes['txEnd'][j]:
nearest_genes['before'].append(0)
else:
nearest_genes['before'].append(genes['txEnd'][j])
return nearest_genes
def do_annotation_roi(self,coordinates,table_of_chrom,roi_of_chrom):
"""Perform annotation for the non-coding regions"""
init = 0
union = union_intervals([roi_of_chrom['start'],roi_of_chrom['end']])
for ncstart,ncend in itertools.izip(union[0], union[1]):
#for ncstart,ncend in itertools.izip(roi_of_chrom['start'],roi_of_chrom['end']):
start,end=where(ncstart,ncend,coordinates[init:])
table_of_chrom['roi'][init+start:init+end]=[1]*(end-start)
init+=end
def summarize(self,table):
"""Provides a summary of the annotation.
Parameters:
table: an AnnotTable object produced by Annotater.annotate()
Output:
summary: a Summary object (see tables.py), which contains the summary of the annotation
"""
# if empty annotation table, just return without doing any.
# empty summary table
summary=Summary()
p=P()
try:
chroms=table.get_chroms()
if not chroms: raise ValueError
except (AttributeError,ValueError):
return summary
# obtain a summary statistics
summary.init_table('whole')
for chrom in chroms:
# get the summary of a single chromosome
self.get_summary(table,summary,chrom)
# integrate it into the 'whole'
self.integrate_summaries(summary,chrom)
# get p(probablities)
p.init_table('whole')
for chrom in chroms:
self.get_p(summary,p,chrom)
# get the probabilities of the whole genome
self.get_p(summary,p,'whole')
return summary,p
def get_summary(self,table,summary,chrom):
"""Get the summary of a single chromosome"""
length=table.size(chrom)[0]
summary.init_table(chrom)
summary[chrom]['Ns']=length
for i in xrange(0,length):
# get some sort of histograms
if table[chrom]['promoter'][i]!=0: summary[chrom]['promoter'][abs(table[chrom]['promoter'][i])-1]+=1
if table[chrom]['bipromoter'][i]!=0: summary[chrom]['bipromoter'][table[chrom]['bipromoter'][i]-1]+=1
if table[chrom]['downstream'][i]!=0: summary[chrom]['downstream'][table[chrom]['downstream'][i]-1]+=1
if table[chrom]['gene'][i]!=0: summary[chrom]['gene'][table[chrom]['gene'][i]-1]+=1
if table[chrom]['rel_loc'][i]!=[0,0]:
summary[chrom]['rel_loc'][0][table[chrom]['rel_loc'][i][0]-1]+=1
summary[chrom]['rel_loc'][1][table[chrom]['rel_loc'][i][1]-1]+=1
if table[chrom]['rel_loc_cds'][i]!=[0,0]:
summary[chrom]['rel_loc_cds'][0][table[chrom]['rel_loc_cds'][i][0]-1]+=1
summary[chrom]['rel_loc_cds'][1][table[chrom]['rel_loc_cds'][i][1]-1]+=1
try:
if table[chrom]['roi'][i]!=0: summary[chrom]['roi']+=1
except KeyError:
pass
# the last gene element is all
summary[chrom]['gene'][-1]=sum(summary[chrom]['gene'][:-1])
# get the cumulative sums
summary[chrom]['promoter']=[sum(summary[chrom]['promoter'][:i]) for i in range(1,len(summary[chrom]['promoter'])+1)]
summary[chrom]['bipromoter']=[sum(summary[chrom]['bipromoter'][:i]) for i in range(1,len(summary[chrom]['bipromoter'])+1)]
summary[chrom]['downstream']=[sum(summary[chrom]['downstream'][:i]) for i in range(1,len(summary[chrom]['downstream'])+1)]
def integrate_summaries(self,summary,chrom):
"""Add the summary of a single chromosome to self.summary['whole']"""
array_adder=lambda x,y: [x[i]+y[i] for i in range(0,len(x))]
# when self.summary['whole']is empty, just copy the summary of the first chromosome.
# Then, add array by array
summary['whole']['promoter']=array_adder(summary['whole']['promoter'],summary[chrom]['promoter'])
summary['whole']['bipromoter']=array_adder(summary['whole']['bipromoter'],summary[chrom]['bipromoter'])
summary['whole']['downstream']=array_adder(summary['whole']['downstream'],summary[chrom]['downstream'])
summary['whole']['gene']=array_adder(summary['whole']['gene'],summary[chrom]['gene'])
summary['whole']['rel_loc'][0]=array_adder(summary['whole']['rel_loc'][0],summary[chrom]['rel_loc'][0])
summary['whole']['rel_loc'][1]=array_adder(summary['whole']['rel_loc'][1],summary[chrom]['rel_loc'][1])
summary['whole']['rel_loc_cds'][0]=array_adder(summary['whole']['rel_loc_cds'][0],summary[chrom]['rel_loc_cds'][0])
summary['whole']['rel_loc_cds'][1]=array_adder(summary['whole']['rel_loc_cds'][1],summary[chrom]['rel_loc_cds'][1])
summary['whole']['roi']+=summary[chrom]['roi']
summary['whole']['Ns']+=summary[chrom]['Ns']
def get_p(self,summary,p,chrom):
"""Get the p of a single chromosome"""
total=summary[chrom]['Ns']
p.init_table(chrom)
# check if the denominator is zero
try:
p[chrom]['promoter']=map(lambda x: 1.0*x/total,summary[chrom]['promoter'])
except ZeroDivisionError:
total=1
p[chrom]['promoter']=map(lambda x: 1.0*x/total,summary[chrom]['promoter'])
p[chrom]['bipromoter']=map(lambda x: 1.0*x/total,summary[chrom]['bipromoter'])
p[chrom]['downstream']=map(lambda x: 1.0*x/total,summary[chrom]['downstream'])
p[chrom]['gene']=map(lambda x: 1.0*x/total,summary[chrom]['gene'])
#relative locations
total_rel_loc=sum(summary[chrom]['rel_loc'][0])
total_rel_loc_cds=sum(summary[chrom]['rel_loc_cds'][0])
# check if the denominator is zero
try:
p_rel_loc0=map(lambda x: 1.0*x/total_rel_loc,summary[chrom]['rel_loc'][0])
except ZeroDivisionError:
total_rel_loc=1
p_rel_loc0=map(lambda x: 1.0*x/total_rel_loc,summary[chrom]['rel_loc'][0])
p_rel_loc1=map(lambda x: 1.0*x/total_rel_loc,summary[chrom]['rel_loc'][1])
try:
p_rel_loc_cds0=map(lambda x: 1.0*x/total_rel_loc_cds,summary[chrom]['rel_loc_cds'][0])
except ZeroDivisionError:
total_rel_loc_cds=1
p_rel_loc_cds0=map(lambda x: 1.0*x/total_rel_loc_cds,summary[chrom]['rel_loc_cds'][0])
p_rel_loc_cds1=map(lambda x: 1.0*x/total_rel_loc_cds,summary[chrom]['rel_loc_cds'][1])
p[chrom]['rel_loc']=[p_rel_loc0,p_rel_loc1]
p[chrom]['rel_loc_cds']=[p_rel_loc_cds0,p_rel_loc_cds1]
try:
p[chrom]['roi']=1.0*summary[chrom]['roi']/total
except ZeroDivisionError:
p[chrom]['roi']=1.0*summary[chrom]['roi']
except KeyError:
pass
try:
p[chrom]['chroms']=1.0*summary[chrom]['Ns']/summary['whole']['Ns']
except ZeroDivisionError:
p[chrom]['chroms']=total
def obtain_distribution_of_sites(self, AnnotT):
"""Obtain the distribution of sites (eg ChIP regions) over the elements
Parameters:
1. AnnotT: AnnotTable object (see in tables.py)
Return:
1. summary: summary of non-overlapping counts of promoter, downstream, gene, and enhancer (and total)
2. p: proportions of promoter, downstream, gene, and enhancer
"""
# get chromosomes
chroms = AnnotT.get_chroms()
if not chroms: return None
summary = {}
summary['whole'] = {'promoter':[0, 0, 0], 'downstream':[0, 0, 0], 'gene':[0, 0, 0, 0], 'enhancer':0, 'total':0}
# iterate through chromosomes
for chrom in chroms:
summary[chrom]={'promoter':[0, 0, 0], 'downstream':[0, 0, 0], 'gene':[0, 0, 0, 0], 'enhancer':0, 'total':0}
length = AnnotT.size(chrom)[0]
for promoter, downstream, gene in itertools.izip(AnnotT[chrom]['promoter'], AnnotT[chrom]['downstream'], AnnotT[chrom]['gene']):
# get some sort of histograms
if abs(promoter) == 1: # promoter 1
summary[chrom]['promoter'][0] += 1
elif downstream == 1: # downstream 1
summary[chrom]['downstream'][0] += 1
elif abs(promoter) == 2: # promoter 2
summary[chrom]['promoter'][1] += 1
elif downstream == 2: # downstream 2
summary[chrom]['downstream'][1] += 1
elif abs(promoter) == 3: # promoter 3
summary[chrom]['promoter'][2] += 1
elif downstream == 3: # downstream 3
summary[chrom]['downstream'][2] += 1
elif gene != 0: # gene
summary[chrom]['gene'][gene-1] += 1
else: # enhancer
summary[chrom]['enhancer'] += 1
# total
summary[chrom]['total'] = sum(summary[chrom]['promoter'] + summary[chrom]['downstream'] + summary[chrom]['gene'] + [summary[chrom]['enhancer']])
# update the whole
summary['whole']['promoter'] = map(lambda x,y: x+y, summary['whole']['promoter'], summary[chrom]['promoter'])
summary['whole']['downstream'] = map(lambda x,y: x+y, summary['whole']['downstream'], summary[chrom]['downstream'])
summary['whole']['gene'] = map(lambda x,y: x+y, summary['whole']['gene'], summary[chrom]['gene'])
summary['whole']['enhancer'] += summary[chrom]['enhancer']
summary['whole']['total'] += summary[chrom]['total']
p = {}
chroms += ['whole']
for chrom in chroms:
p[chrom] = {'promoter':[0.0, 0.0, 0.0], 'downstream':[0.0, 0.0, 0.0], 'gene':[0.0, 0.0, 0.0, 0.0], 'enhancer':0.0}
# if total = 0, just go to the next chromosome
if summary[chrom]['total'] == 0:
continue
p[chrom]['promoter'] = map(lambda x, y: 1.0*x/y, summary[chrom]['promoter'], [summary[chrom]['total']]*len(summary[chrom]['promoter']))
p[chrom]['downstream'] = map(lambda x, y: 1.0*x/y, summary[chrom]['downstream'], [summary[chrom]['total']]*len(summary[chrom]['downstream']))
p[chrom]['gene'] = map(lambda x, y: 1.0*x/y, summary[chrom]['gene'], [summary[chrom]['total']]*len(summary[chrom]['gene']))
p[chrom]['enhancer'] = 1.0 * summary[chrom]['enhancer'] / summary[chrom]['total']
return summary, p
def obtain_distribution_of_sites_per_chrom(self, AnnotT):
"""Obtain the distribution of genome over the elements.
Note that this function works only if the annotation has been quantized!!! See def.annotate method.
arguments:
1. AnnotT: AnnotTable object (see in tables.py)
Return:
1. summary: summary of non-overlapping counts of promoter, downstream, gene, and enhancer (and total)
2. p: proportions of promoter, downstream, gene, and enhancer
"""
# get chromosomes
chroms = AnnotT.get_chroms()
if not chroms: return None
summary = {}
#summary['whole'] = {'promoter':[0, 0, 0], 'downstream':[0, 0, 0], 'gene':[0, 0, 0, 0], 'enhancer':0, 'total':0}
# iterate through chromosomes
for chrom in chroms:
summary[chrom]={'promoter':[0, 0, 0], 'downstream':[0, 0, 0], 'gene':[0, 0, 0, 0], 'enhancer':0, 'total':0}
length = AnnotT.size(chrom)[0]
for promoter, downstream, gene in itertools.izip(AnnotT[chrom]['promoter'], AnnotT[chrom]['downstream'], AnnotT[chrom]['gene']):
# get some sort of histograms
if abs(promoter) == 1: # promoter 1
summary[chrom]['promoter'][0] += 1
elif downstream == 1: # downstream 1
summary[chrom]['downstream'][0] += 1
elif abs(promoter) == 2: # promoter 2
summary[chrom]['promoter'][1] += 1
elif downstream == 2: # downstream 2
summary[chrom]['downstream'][1] += 1
elif abs(promoter) == 3: # promoter 3
summary[chrom]['promoter'][2] += 1
elif downstream == 3: # downstream 3
summary[chrom]['downstream'][2] += 1
elif gene != 0: # gene
summary[chrom]['gene'][gene-1] += 1
else: # enhancer
summary[chrom]['enhancer'] += 1
# total
summary[chrom]['total'] = sum(summary[chrom]['promoter'] + summary[chrom]['downstream'] + summary[chrom]['gene'] + [summary[chrom]['enhancer']])
return summary
### test version of annotator for genome background
###
class AnnotatorGBG(Annotator):
"""Annotator inherits Annotator"""
def __init__(self):
"""Constructor"""
Annotator.__init__(self)
def annotate(self,genome_coordinates=None,gene_table=None,roi=None,prom=10000,biprom=20000, down=10000, gene_div=(3,5),quantize=True):
"""Annotate given coordinates based on the given gene table."""
# get the chromsomes of the gene table and genome coordinates
try:
chroms_gc=genome_coordinates.keys()
chroms_gt=gene_table.get_chroms()
chroms_gt,chroms_gc=set(chroms_gt),set(chroms_gc)
chroms=chroms_gt.intersection(chroms_gc)
chroms=list(chroms)
chroms.sort()
num_coordinates={}
num_genes={}
for chrom in chroms:
num_coordinates[chrom]=len(genome_coordinates[chrom])
num_genes[chrom]=len(gene_table[chrom][gene_table[chrom].keys()[0]])
except AttributeError:
raise Exception('Genome coordinates and gene table must be given for genome annotation')
#initialize with an empty dictionary
table=AnnotTable()
#iterate over the chromosomes
for chrom in chroms:
genes=gene_table[chrom]
num_genes_this_chr=num_genes[chrom]
coordinates=genome_coordinates[chrom]
num_coordinates_this_chr=num_coordinates[chrom]
table.init_table(chrom)
#initialize the promoter distances. This will be used in obtaining bidirectional promoter too.
prom_dists=[[0,0] for i in xrange(num_coordinates_this_chr)]
# get the nearest genes to set the searching range for promoter and downstream
nearest_genes=self.find_nearest_genes(genes)
# point begin
pointerBeg=0
maxprom = prom
maxdown = down
for i in xrange(0,num_genes_this_chr):
# get the strand of the gene
try:
strand=genes['strand'][i]
except KeyError:
raise Exception("'strand' must be included in the gene annotation table for running CEAS")
# get the beginning and end point of search
# the beginning and end points are the end of the previous gene and the beginning of the next gene.
beg,end=0,0
try:
if strand=='+':
beg=max(genes['txStart'][i]-maxprom, nearest_genes['before'][i])
end=min(genes['txEnd'][i]+maxdown, nearest_genes['after'][i])
else:
beg=max(genes['txStart'][i]-maxdown, nearest_genes['before'][i])
end=min(genes['txEnd'][i]+maxprom, nearest_genes['after'][i])
except KeyError: # check the gene annotation table has necessary columns
raise Exception("'txStart' and 'txEnd' must be included in the gene annotation table for running CEAS")
# set search index j to the begining point of the last gene. This makes sure that we include isoforms
j=pointerBeg
if coordinates[j]>end: continue
# two while loops to detect the annotation start coordinate for the current gene.
while j>0 and coordinates[j]>=beg:
j-=1
while j<num_coordinates_this_chr and coordinates[j]<beg:
if j>=table.size(chrom)[0]:
table.add_row(chrom,[coordinates[j]]+[0]*table.get_column_num())
j+=1
# if get to the end of chromosome, then break
if j==num_coordinates_this_chr: break
# save the current start point for the next gene
pointerBeg=j
# otherwise, get the annotations of the probes related with the current gene
while j<num_coordinates_this_chr and (coordinates[j]>=beg and coordinates[j]<=end):
# get the annotation and update the entire annotation table
single_annot=self.annotate_single(coordinates[j],strand,genes['txStart'][i],genes['txEnd'][i],\
genes['cdsStart'][i],genes['cdsEnd'][i],genes['exonStarts'][i],genes['exonEnds'][i],prom,down,gene_div)
self.update_annot_table(table,single_annot,coordinates,prom_dists,chrom,j,biprom)
j+=1
# quantize promoter, bipromoter and downstream
if quantize:
table[chrom]['promoter']=ordinate2(table[chrom]['promoter'],prom)
table[chrom]['bipromoter']=ordinate2(table[chrom]['bipromoter'],biprom)
table[chrom]['downstream']=ordinate2(table[chrom]['downstream'],down)
if roi:
roichroms = roi.get_chroms()
for chrom in chroms:
table[chrom]['roi']=[0]*len(genome_coordinates[chrom])
if chrom in roichroms:
self.do_annotation_roi(genome_coordinates[chrom], table[chrom], roi[chrom])
return table
def annotate_single(self,coordinate,strand,txStart,txEnd,cdsStart,cdsEnd,exonStarts,exonEnds,prom,down,gene_div):
"""Annotate a single genome coordinate
Parameters:
1. coordinate: a list of genome locations to annotate
2. strand: the strand (+/-) of the gene
3. txStart: transcription start
4. txEnd: transcription end
5. cdsStart: translation start
6. cdsEnd: translation end
7. exonStarts: exon start locations
8. exonEnds: exon end locations
9. prom: promoter lengths (e.g., (1000, 2000, 3000))
10. down: downstream lengths (e.g., (1000, 2000, 3000))
11. gene: the number of divisions of a gene (eg, (3,5))
"""
# container of the annotation for a single location.
single_annot=None
maxprom = prom
maxdown = down
# get the annotation for the location
# + strand
if strand=='+':
# promoter
if coordinate<txStart and coordinate>=txStart-maxprom: # txStart-promo <= prob < txStart
single_annot=['promoter',txStart-coordinate]
# downstream
elif coordinate>=txEnd and coordinate<txEnd+maxdown:# txEnd <= prob < txEnd+down
single_annot=['downstream',coordinate-txEnd]
# gene
elif coordinate>=txStart and coordinate<txEnd:
isExon=self.exonify(coordinate,exonStarts,exonEnds)
# exon
if isExon !=None:
# coding exon
if coordinate>=cdsStart and coordinate<cdsEnd: single_annot=['gene',3,self.get_rel_loc_cds(coordinate,strand,exonStarts,exonEnds,isExon,gene_div)]
# 5'UTR
elif coordinate<cdsStart: single_annot=['gene',1]
# 3'UTR
else: single_annot=['gene',2]
# intron
else:
single_annot=['gene',4]
# relative location within the gene
single_annot.insert(2,self.get_rel_loc(coordinate,strand,txStart,txEnd,gene_div))
# - strand
else:
# promoter
if coordinate>=txEnd and coordinate<txEnd+maxprom:
single_annot=['promoter',txEnd-coordinate]
# downstream
elif coordinate<txStart and coordinate>=txStart-maxdown:
single_annot=['downstream',txStart-coordinate]
# gene
elif coordinate>=txStart and coordinate<txEnd:
isExon=self.exonify(coordinate,exonStarts,exonEnds) #then in an exon
# exon
if isExon != None:
# coding exon
if coordinate>=cdsStart and coordinate<cdsEnd: single_annot=['gene',3,self.get_rel_loc_cds(coordinate,strand,exonStarts,exonEnds,isExon,gene_div)]
# 5'UTR
elif coordinate>=cdsEnd: single_annot=['gene',1]
# 3'UTR
else: single_annot=['gene',2]
# intron
else:
single_annot=['gene',4]
# relative location within the gene
single_annot.insert(2,self.get_rel_loc(coordinate,strand,txStart,txEnd,gene_div))
return single_annot
def update_annot_table(self,table,single_annot,coordinates,prom_dists,chrom,index_coord,biprom):
"""Update the annotation table by comparing the current annotation (single_annot) and the existing annotation in the table"""
categories=['promoter','bipromoter','downstream','gene']
# If the current annotation is NoneType, which means that the curren genome coordinate does not belong to any of
# promoters,downstreams,or genes, just add an empty annotation to the end of the annotation table
maxbiprom = biprom
try:
category=single_annot[0]
except TypeError: # when single_annot=None
if index_coord>=table.size(chrom)[0]:
table.add_row(chrom,[coordinates[index_coord]]+[0]*table.get_column_num())
return
# When an annotation does not exist at the current genome coordinate, append the new annotation
# and update
try:
original=table[chrom][category][index_coord]
except IndexError:
table.add_row(chrom,[coordinates[index_coord]]+[0]*table.get_column_num())
original=table[chrom][category][index_coord]
# +/- promoter distances
prom_dist=prom_dists[index_coord]
# when an annotation already exists at the current genome coordinate
# scaler=lambda dist,maxrange,scale:int(1.0
new=single_annot[1]
# promoter and bipromoter
if category=='promoter':
# promoter
# update + and - distances
if new < 0:
if prom_dist[0]==0:
prom_dist[0]=new
else:
prom_dist[0]=max(new,prom_dist[0])
elif new > 0:
if prom_dist[1]==0:
prom_dist[1]=new
else:
prom_dist[1]=min(new,prom_dist[1])
# update the promoter annnotation of the current probe
if original==0:
if prom_dist[0]==0 or prom_dist[1]==0:
fin_prom=abs(sum(prom_dist))
else:
fin_prom=min(abs(prom_dist[0]),prom_dist[1])
else:
if prom_dist[0]==0 and prom_dist[1]==0:
fin_prom=original
elif prom_dist[0]==0 and prom_dist[1]!=0:
fin_prom=min(prom_dist[1],original)
elif prom_dist[0]!=0 and prom_dist[1]==0:
fin_prom=min(abs(prom_dist[0]),original)
else:
fin_prom=min([abs(prom_dist[0]),prom_dist[1],original])
#update the table with the shortest distance from TSS
table[chrom][category][index_coord]=fin_prom
# bidirectional promoter
# if + and - distances have non-zero values, that is bidirectional promoter
if prom_dist[0]*prom_dist[1] != 0:
new_biprom=abs(prom_dist[0])+prom_dist[1]
#if new_biprom<=cutoff:
if new_biprom<=maxbiprom:
# original bipromoter value
ori_biprom=table[chrom]['bipromoter'][index_coord]
# update the bipromoter
if ori_biprom==0: fin_biprom=new_biprom
else: fin_biprom=min(ori_biprom,new_biprom)
table[chrom]['bipromoter'][index_coord]=fin_biprom
elif category=='downstream':
if original==0: fin_down=new
else: fin_down=min(new,original)
table[chrom][category][index_coord]=fin_down
elif category=='gene':
if original==0: fin_gene=new
else: fin_gene=min(new,original)
table[chrom][category][index_coord]=fin_gene
table[chrom]['rel_loc'][index_coord]=copy.deepcopy(single_annot[2])
try:
table[chrom]['rel_loc_cds'][index_coord]=copy.deepcopy(single_annot[3])
except IndexError:
pass
def summarize(self,table,maxprom,maxbiprom,maxdown,by):
"""Provides a summary of the annotation.
Parameters:
1. table: an AnnotTable object produced by Annotater.annotate()
2. maxprom: the largest promoter size
3. maxbiprom: the largest bipromoter size
4. maxdown: the largest downstream size
5. by: bin size
Output:
summary: a Summary object (see tables.py), which contains the summary of the annotation
"""
# get bins from the max sizes of promoters, bidirectional promoters, and downstream
bins_prom=[0,500]+ range(1000, maxprom, by) + [maxprom]
bins_biprom=[0,500]+ range(1000, maxbiprom, by) + [maxbiprom]
bins_down=[0,500]+ range(1000, maxdown, by) + [maxdown]
summary=SummaryGBG(numprom=len(bins_prom)-1,numbiprom=len(bins_biprom)-1,numdown=len(bins_down)-1)
p=PGBG(numprom=len(bins_prom)-1,numbiprom=len(bins_biprom)-1,numdown=len(bins_down)-1)
try:
chroms=table.get_chroms()
if not chroms: raise ValueError
except (AttributeError,ValueError):
return summary
# obtain a summary statistics
summary.init_table('whole')
for chrom in chroms:
# get the summary of a single chromosome
self.get_summary(table,summary,chrom,bins_prom,bins_biprom,bins_down)
# integrate it into the 'whole'
self.integrate_summaries(summary,chrom)
# get p(probablities)
p.init_table('whole')
for chrom in chroms:
self.get_p(summary,p,chrom)
# get the probabilities of the whole genome
self.get_p(summary,p,'whole')
return summary,p
def get_summary(self,table,summary,chrom,bins_prom,bins_biprom,bins_down):
"""Get the summary of a single chromosome"""
length=table.size(chrom)[0]
summary.init_table(chrom)
summary[chrom]['Ns']=length
# get the binned promoter, bipromoter and downstream distances
binned_prom,binned_biprom,binned_down=self.cumbin(table,chrom,bins_prom,bins_biprom,bins_down)
summary[chrom]['promoter']=binned_prom
summary[chrom]['bipromoter']=binned_biprom
summary[chrom]['downstream']=binned_down
for i in xrange(0,length):
# get some sort of histograms
if table[chrom]['gene'][i]!=0: summary[chrom]['gene'][table[chrom]['gene'][i]-1]+=1
if table[chrom]['rel_loc'][i]!=[0,0]:
summary[chrom]['rel_loc'][0][table[chrom]['rel_loc'][i][0]-1]+=1
summary[chrom]['rel_loc'][1][table[chrom]['rel_loc'][i][1]-1]+=1
if table[chrom]['rel_loc_cds'][i]!=[0,0]:
summary[chrom]['rel_loc_cds'][0][table[chrom]['rel_loc_cds'][i][0]-1]+=1
summary[chrom]['rel_loc_cds'][1][table[chrom]['rel_loc_cds'][i][1]-1]+=1
try:
if table[chrom]['roi'][i]!=0: summary[chrom]['roi']+=1
except KeyError:
pass
# the last gene element is all
summary[chrom]['gene'][-1]=sum(summary[chrom]['gene'][:-1])
def cumbin(self,table,chrom,bins_prom,bins_biprom,bins_down):
"""Calculate the genome bg.
"""
allbp = [0] * len(bins_prom)
allbd = [0] * len(bins_down)
allbbp = [0] * len(bins_biprom)
prom=table[chrom]['promoter']
down=table[chrom]['downstream']
biprom=table[chrom]['bipromoter']
bp=self.bin(prom, bins_prom)
bd=self.bin(down, bins_down)
bbp=self.bin(biprom, bins_biprom)
###
allbp, c = array_adder(allbp, bp)
allbd, c = array_adder(allbd, bd)
allbbp, c = array_adder(allbbp, bbp)
# cumsum from 500 to maxprom (or maxbiprom or maxdown)
cumbp = cumsum(allbp)
cumbd = cumsum(allbd)
cumbbp = cumsum(allbbp)
return cumbp, cumbbp, cumbd
def bin(self, x,bins):
"""Do binning for x.
Parameters:
1. x: an array of data to do binning for
2. bins: an array of bins. b[i-1]<= <b[i]
Return:
1. binned: an array of binned data. Each array element is the count within each bin
"""
# binlen=len(bins)
# binned=[0]*(binlen+1)
# sx=sorted(x)
# xlen=len(sx)
# j=0
#
# #x < bins[0]
# while j<xlen and sx[j]<bins[0]:
# binned[0]+=1
# j+=1
#
# # bins[i] <= x < bins[i+1]
# for i in xrange(0,binlen-1):
# while j<xlen and sx[j]>=bins[i] and sx[j]<bins[i+1]:
# binned[i+1]+=1
# j+=1
#
# # x>=bins[-1]
# while j < xlen and sx[j]>=bins[-1]:
# binned[-1]+=1
# j+=1
#
# return binned
binlen = len(bins)
binned = [0]* (binlen-1)
sx = sorted(x)
xlen = len(sx)
j=0
# prefilter
while j < xlen and sx[j]<=bins[0]:
j+=1
for i in xrange(0, binlen-1):
while j < xlen and sx[j] > bins[i] and sx[j] <= bins[i+1]:
binned[i] += 1
j+=1
return binned
def obtain_distribution_of_sites_for_genome(self, AnnotT, prom=(1000, 2000, 3000), down=(1000, 2000, 3000)):
"""Obtain the distribution of genome over the elements
arguments:
1. AnnotT: AnnotTable object (see in tables.py)
2. prom: promoter bins
3. down: downstream bins
Return:
1. summary: summary of non-overlapping counts of promoter, downstream, gene, and enhancer (and total)
2. p: proportions of promoter, downstream, gene, and enhancer
"""
# get chromosomes
chroms = AnnotT.get_chroms()
if not chroms: return None
summary = {}
#summary['whole'] = {'promoter':[0, 0, 0], 'downstream':[0, 0, 0], 'gene':[0, 0, 0, 0], 'enhancer':0, 'total':0}
# iterate through chromosomes
for chrom in chroms:
summary[chrom]={'promoter':[0, 0, 0], 'downstream':[0, 0, 0], 'gene':[0, 0, 0, 0], 'enhancer':0, 'total':0}
length = AnnotT.size(chrom)[0]
for promoter, downstream, gene in itertools.izip(AnnotT[chrom]['promoter'], AnnotT[chrom]['downstream'], AnnotT[chrom]['gene']):
# get some sort of histograms
if abs(promoter) > 0 and abs(promoter) <= prom[0]: # promoter 1
summary[chrom]['promoter'][0] += 1
elif downstream > 0 and downstream <= down[0]: # downstream 1
summary[chrom]['downstream'][0] += 1
elif abs(promoter) > prom[0] and abs(promoter) <= prom[1]: # promoter 2
summary[chrom]['promoter'][1] += 1
elif downstream > down[0] and downstream <= down[1]: # downstream 2
summary[chrom]['downstream'][1] += 1
elif abs(promoter) > prom[1] and abs(promoter) <= prom[2]: # promoter 3
summary[chrom]['promoter'][2] += 1
elif downstream > down[1] and downstream <= down[2]: # downstream 3
summary[chrom]['downstream'][2] += 1
elif gene != 0: # gene
summary[chrom]['gene'][gene-1] += 1
else: # enhancer
summary[chrom]['enhancer'] += 1
# total
summary[chrom]['total'] = sum(summary[chrom]['promoter'] + summary[chrom]['downstream'] + summary[chrom]['gene'] + [summary[chrom]['enhancer']])
return summary
###
class GeneAnnotator:
"""Class GeneAnnotator performs gene-centered annotation given a list of ChIP regions
"""
def __init__(self):
self.map = DataFrame()
self.u = 3000
self.d = 3000
def annotate(self, GeneT, ChIP, u=3000, d=3000, name2=False):
"""Perform gene-centered annotation
Parameters:
"""
self.u = u
self.d = d
# initialize the DtaFrame
self.map.append_column([], colname = 'name')
self.map.append_column([], colname = 'chr')
self.map.append_column([], colname = 'txStart')
self.map.append_column([], colname = 'txEnd')
self.map.append_column([], colname = 'strand')
self.map.append_column([], colname = 'dist u TSS')
self.map.append_column([], colname = 'dist d TSS')
self.map.append_column([], colname = 'dist u TTS')
self.map.append_column([], colname = 'dist d TTS')
self.map.append_column([], colname = '%dbp u TSS' %u)
self.map.append_column([], colname = '%dbp d TSS' %u)
self.map.append_column([], colname = '1/3 gene')
self.map.append_column([], colname = '2/3 gene')
self.map.append_column([], colname = '3/3 gene')
self.map.append_column([], colname = '%dbp d TTS' %d)
self.map.append_column([], colname = 'exons')
if name2: # if the user wants to save 'name2'
self.map.append_column([], colname = 'name2')
# get the chroms of the gene annotation table
chroms = GeneT.get_chroms()
chroms.sort()
chroms_ChIP = ChIP.get_chroms()
#maxsearch = 10000 # maximum search range for finding the nearest binding site.
# iterate through the chromosomes
for chrom in chroms:
# the number of min distances and the number of annotation fields
n_mindist = 4 # this variable should be manually updated whenever 'mindists' changes.
n_annot_fields = 7 # this variable should be manually updated whenever 'annotations' changes.
# if chrom is also in chroms of ChIP, do annotation. Otherwise, just fill out with 'NA'
n_genes = len(GeneT[chrom]['txStart'])
if chrom in chroms_ChIP:
n_ChIP = len(ChIP[chrom]['start']) # get the number of genes
ChIP_start = ChIP[chrom]['start']
ChIP_end = ChIP[chrom]['end']
ChIP_center = sorted(map(lambda x, y: (x+y)/2, ChIP_start, ChIP_end))
else:
# if this chromosome is not in ChIP, just fill out w/ NA
mindist = ['NA'] * n_mindist
annotation = [0] * n_annot_fields
if name2:
for name, chr, txStart, txEnd, strand, n2 in itertools.izip(GeneT[chrom]['name'], [chrom]*n_genes, GeneT[chrom]['txStart'], GeneT[chrom]['txEnd'], GeneT[chrom]['strand'], GeneT[chrom]['name2']):
self.map.append_row([name, chr, txStart, txEnd, strand] + mindist + annotation + [n2])
else:
for name, chr, txStart, txEnd, strand in itertools.izip(GeneT[chrom]['name'], [chrom]*n_genes, GeneT[chrom]['txStart'], GeneT[chrom]['txEnd'], GeneT[chrom]['strand']):
self.map.append_row([name, chr, txStart, txEnd, strand] + mindist + annotation)
continue # then continue to the next chromosome
# 1. get the minimum distances from TSS and TTS
# this chromosome's txStart, txEnd, and strand
txStart = GeneT[chrom]['txStart']
txEnd = GeneT[chrom]['txEnd']
strand = GeneT[chrom]['strand']
# get the distances from every txStart to its nearest binding sites in both directions.
txStart_up, txStart_do = self.min_dists(txStart, ChIP_center)
# get the distances from every txStart to its nearest binding sites in both directions.
txEnd_up, txEnd_do = self.min_dists(txEnd, ChIP_center)
# re-order the distances of each gene according to the transcription direction
mindists = self.reorder(txStart_up, txStart_do, txEnd_up, txEnd_do, strand)
# 2. get the gene-centered annotation
pointerBeg = 0
annotations = []
for txStart, txEnd, strand, exonStarts, exonEnds in itertools.izip(GeneT[chrom]['txStart'], GeneT[chrom]['txEnd'], GeneT[chrom]['strand'], GeneT[chrom]['exonStarts'], GeneT[chrom]['exonEnds']):
# get the gene region to search for binding site
if strand == '+':
lower = txStart - u
upper = txEnd + d
else:
lower = txStart - d
upper = txEnd + u
# set search index j to the begining point of the last gene. This makes sure that we include isoforms
j = pointerBeg
#if ChIP_start[j] > upper: continue
# adjust the search start point
while (j > 0 and j < n_ChIP) and ChIP_start[j] >= lower:
j-=1
while j < n_ChIP and ChIP_end[j] < lower:
j+=1
# if get to the end of ChIP, break
#if j==n_ChIP: break
# save the current start point for the next gene
pointerBeg = j
# otherwise, get the annotations of the probes related with the current gene
while j < n_ChIP and (ChIP_end[j] >= lower and ChIP_start[j] <= upper):
j+=1
# any ChIP region(s) in the search range
if pointerBeg < j:
annotation = self.annotate_single_gene(txStart, txEnd, strand, u, d, exonStarts, exonEnds, ChIP_start[pointerBeg:j], ChIP_end[pointerBeg:j], precision=2)
else:
annotation = [0] * n_annot_fields
annotations.append(annotation)
# 3. save as DataFrame
if name2:
for name, chr, txStart, txEnd, strand, midist, annotation, n2 in itertools.izip(GeneT[chrom]['name'], [chrom]*n_genes, GeneT[chrom]['txStart'], GeneT[chrom]['txEnd'], GeneT[chrom]['strand'], mindists, annotations, GeneT[chrom]['name2']):
self.map.append_row([name, chr, txStart, txEnd, strand] + mindist + annotation + [n2])
else:
for name, chr, txStart, txEnd, strand, mindist, annotation in itertools.izip(GeneT[chrom]['name'], [chrom]*n_genes, GeneT[chrom]['txStart'], GeneT[chrom]['txEnd'], GeneT[chrom]['strand'], mindists, annotations):
self.map.append_row([name, chr, txStart, txEnd, strand] + mindist + annotation)
def min_dists(self, gref, cref):
"""Return the distances to the nearest binding sites upstream and downstream of gene reference point.
ARGUMENTS:
gref : a series of gene reference points such as txStart or txEnd
cref : a series of ChIP region reference points such as the centers of ChIP regions
"""
l_cref = len(cref)
pointBeg = 0
up = []
do = []
for g in gref:
#j = bisect.bisect_left(cref, g, pointBeg)
j = bisect.bisect_left(cref, g)
if j == 0:
if g == cref[j]:
up.append(0)
do.append(0)
else:
up.append('NA')
do.append(cref[j] - g)
elif j == l_cref:
up.append(g - cref[j-1])
do.append('NA')
else:
if g == cref[j]:
up.append(0)
do.append(0)
else:
up.append(g - cref[j-1])
do.append(cref[j] - g)
pointBeg = j
return up, do
def reorder(self, txStart_up, txStart_do, txEnd_up, txEnd_do, strand):
"""Correct the order according to the strand.
ARGUEMNTS:
txStart_up : distances to binding sites upstream of txStart
txStart_do : distances to binding sites downstream of txStart
txEnd_up : distances to binding sites upstream of txEnd
txEnd_do : distances to binding sites downstream of txEnd
strand : strands of genes
"""
mindists = []
for txSu, txSd, txEu, txEd, s in itertools.izip(txStart_up, txStart_do, txEnd_up, txEnd_do, strand):
if s == '+':
mindists.append([txSu, txSd, txEu, txEd])
else:
mindists.append([txEd, txEu, txSd, txSu])
return mindists
def get_gene_sections(self, txStart, txEnd):
"""Divide equally each gene into 3 sections
Parameters:
1. txStart: start position of a gene
2. txEnd: end position of the gene
"""
onethird = (txEnd - txStart)/3
gene = [txStart, txStart + onethird, txEnd - onethird, txEnd]
return gene
def get_nearTSS_sections(self, txStart, txEnd, strand, u = 3000):
"""Get u bp ustream and downstream of TSS"""
if strand == '+':
nearTSS = [max(0, txStart - u), txStart, txStart + u]
else:
nearTSS = [max(0, txEnd - u), txEnd, txEnd + u]
return nearTSS
def get_nearTTS_sections(self, txStart, txEnd, strand, d = 3000):
"""Get d bp downstream of TTS"""
if strand == '+':
nearTTS = [txEnd, txEnd + d]
else:
nearTTS = [txStart - d, txStart]
return nearTTS
def extract_txStarts(self,txS,txE,strand,name,sort=True):
"""Extract txStarts given 'txStart', 'txEnd' and 'strand' of a gene annotation table.
Parameters:
1. txS: 'txStart'
2. txE: 'txEnd'
3. starnd: 'strand'
4. name: 'name'
4. sort: True=sort by value False=just extract and return
Return:
a list, refseqs = [(txStart1,strand1,name1),...(txStartn,strandn,namen)]
"""
refseqs=[]
for s,e,st,n in itertools.izip(txS,txE,strand,name):
if st=='+':
refseqs.append((s,e,st,n))
else:
refseqs.append((e,s,st,n))
if sort:
refseqs=sorted(refseqs,key=operator.itemgetter(0))
return refseqs
def get_overlap(self, start, end, ChIP_start, ChIP_end):
"""Return the overlap in bp"""
overlap = max(0, min(end, ChIP_end) - max(start, ChIP_start))
return overlap
def annotate_single_gene(self, txStart, txEnd, strand, u, d, exonStarts, exonEnds, ChIP_starts, ChIP_ends, precision=None):
"""Annotate the gene with a single ChIP"""
gene = self.get_gene_sections(txStart, txEnd) # get the sections of a gene to consider
gene_len = len(gene)
len_in_bp = [gene[i+1] - gene[i] for i in range(gene_len - 1)]
annot_gene = [0] * (gene_len - 1)
nearTSS = self.get_nearTSS_sections(txStart, txEnd, strand, u)
nearTSS_len = len(nearTSS)
len_in_bp_nearTSS = [nearTSS[i+1] - nearTSS[i] for i in range(nearTSS_len - 1)]
annot_nearTSS = [0] * (nearTSS_len - 1)
nearTTS = self.get_nearTTS_sections(txStart, txEnd, strand, d)
nearTTS_len = len(nearTTS)
len_in_bp_nearTTS = [nearTTS[i+1] - nearTTS[i] for i in range(nearTTS_len - 1)]
annot_nearTTS = [0] * (nearTTS_len - 1)
for ChIP_start, ChIP_end in itertools.izip(ChIP_starts, ChIP_ends):
temp = [0] * (gene_len - 1)
for i in range(gene_len - 1):
temp[i]=self.get_overlap(gene[i], gene[i+1], ChIP_start, ChIP_end)
annot_gene = map(lambda x, y: x + y, annot_gene, temp)
temp_nearTSS = [0] * (nearTSS_len - 1)
for i in range(nearTSS_len - 1):
temp_nearTSS[i] = self.get_overlap(nearTSS[i], nearTSS[i+1], ChIP_start, ChIP_end)
annot_nearTSS = map(lambda x, y: x + y, annot_nearTSS, temp_nearTSS)
temp_nearTTS = [0] * (nearTTS_len - 1)
for i in range(nearTTS_len - 1):
temp_nearTTS[i] = self.get_overlap(nearTTS[i], nearTTS[i+1], ChIP_start, ChIP_end)
annot_nearTTS = map(lambda x, y: x + y, annot_nearTTS, temp_nearTTS)
# normalize the annotation wrt length
annot_gene = [1.0*a/l for a, l in itertools.izip(annot_gene, len_in_bp)]
annot_nearTSS = [1.0*a/l for a, l in itertools.izip(annot_nearTSS, len_in_bp_nearTSS)]
annot_nearTTS = [1.0*a/l for a, l in itertools.izip(annot_nearTTS, len_in_bp_nearTTS)]
# if negative strand, reverse the annotation
if strand =='-':
annot_gene.reverse()
annot_nearTSS.reverse()
annot_nearTTS.reverse()
annot_exons = [0] * len(exonStarts)
n_exons = len(exonStarts)
len_exons = sum(map(lambda x, y: y - x, exonStarts, exonEnds))
# get the coverage on the exons
for ChIP_start, ChIP_end in itertools.izip(ChIP_starts, ChIP_ends):
temp_exons = [0] * n_exons
for i in range(n_exons):
temp_exons[i] = self.get_overlap(exonStarts[i], exonEnds[i], ChIP_start, ChIP_end)
annot_exons = map(lambda x, y: x + y, annot_exons, temp_exons)
annot_exons = [1.0 * sum(annot_exons)/ len_exons]
# control the output number precision
if type(precision) == int:
if precision > 0:
annot_nearTSS = map(lambda x: round(x, precision), annot_nearTSS)
annot_gene = map(lambda x: round(x, precision), annot_gene)
annot_nearTTS = map(lambda x: round(x, precision), annot_nearTTS)
annot_exons = map(lambda x: round(x, precision), annot_exons)
return annot_nearTSS + annot_gene + annot_nearTTS + annot_exons
def write(self, fn, description=True):
"""Write the gene-centered annotation result in a TXT file with XLS extension
Parameters:
1. fn: file name. XLS extension will be added automatically.
2. description: If True, add a header of desciption of columns
"""
# get the span sizes for upstream and downstream
u = self.u
d = self.d
# if description is True, put comment (header)
if description == True:
comment = "\n".join(("# RefSeq: RefSeq ID", \
"# chr: chromosome of a RefSeq gene",\
"# txStart: 5' end of a RefSeq gene", \
"# txEnd: 3' end site of a RefSeq gene", \
"# strand: strand of a RefSeq gene", \
"# dist u TSS: Distance to the nearest ChIP region's center upstream of transcription start site (bp)", \
"# dist d TSS: Distance to the nearest ChIP region's center downstream of transcription start site (bp)",\
"# dist u TTS: Distance to the nearest ChIP region's center upstream of transcription end site (bp)", \
"# dist d TTS: Distance to the nearest ChIP region's center downstream of transcription end (bp)", \
"# %dbp u TSS: Occupancy rate of ChIP region in %dbp upstream of transcription start site (0.0 - 1.0)" %(u, u),\
"# %dbp d TSS: Occupancy rate of ChIP region in %dbp downstream of transcription start site (0.0 - 1.0)" %(u, u), \
"# 1/3 gene: Occupancy rate of ChIP region in 1/3 gene (0.0 - 1.0)", \
"# 2/3 gene: Occupancy rate of ChIP region in 2/3 gene (0.0 - 1.0)", \
"# 3/3 gene: Occupancy rate of ChIP region in 3/3 gene (0.0 - 1.0)", \
"# %dbp d TTS: Occupancy rate of ChIP region in %dbp downstream of transcriptino end (0.0 - 1.0)" %(d, d), \
"# exons: Occupancy rate of ChIP regions in exons (0.0-1.0)", \
"# Note that txStart and txEnd indicate 5' and 3' ends of genes whereas TSS and TTS transcription start and end sites in consideration of strand."))
comment += "\n"
else:
comment = ""
self.map.write(fn = fn + '.xls', comment = comment)
#-------------------------------------
# function
#-------------------------------------
def make_table_complete(table,chroms):
"""Make the given table complete by adding rows of missing chromosomes.
Some chromosomes might have ChIP regions, which those chromosomes to be empty in the ChIP annotation Summary and P tables.
In such case, this function fills up the chromosomes with 0 (or 0.0 for P) to prevent errors in advance.
Parameters:
1. table: a table object (Summary or P, see in tables.py) that will be completed.
2. chroms: a list of reference chromosomes. Usually from a Summary or P object
"""
newchroms = [c for c in chroms if c!= 'whole']
for chrom in newchroms:
if not table.has_chrom(chrom): table.init_table(chrom)
def estimate_pvals(genome_p,ChIP_summary,ChIP_p):
"""Estimate p values using the binomial modeling
Parameters:
1. genome_p: a P object (see tables.py) of genome background statistics.
2. ChIP_summary: a Summary object (see tables.py), which contains the summary of ChIP annotation.
3. ChIP_p: a P object (see tables.py), which contains the probabilities of ChIP annotation.
Output:
pvalue: a P object of p-values.
"""
pvalue=P()
chroms=set(genome_p.get_chroms()).intersection(set(ChIP_summary.get_chroms()))
chroms = list(chroms)
for chrom in chroms:
_get_pvals(genome_p,ChIP_summary,ChIP_p,pvalue,chrom)
N=ChIP_summary['whole']['Ns']
for chrom in chroms:
q=ChIP_summary[chrom]['Ns']
p=genome_p[chrom]['chroms']
pa=ChIP_p[chrom]['chroms']
pvalue[chrom]['chroms']=_calc_pval(q,N,p,pa)
return pvalue
def _get_pvals(genome_p,ChIP_summary,ChIP_p,pvalue,chrom):
"""Get pvalues for the given chromosome"""
pvalue.init_table(chrom)
length=len(ChIP_summary[chrom]['promoter'])
N=ChIP_summary[chrom]['Ns']
for i in range(0,length):
q=ChIP_summary[chrom]['promoter'][i]
p=genome_p[chrom]['promoter'][i]
pa=ChIP_p[chrom]['promoter'][i]
pvalue[chrom]['promoter'][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['bipromoter'])
N=ChIP_summary[chrom]['Ns']
for i in range(0,length):
q=ChIP_summary[chrom]['bipromoter'][i]
p=genome_p[chrom]['bipromoter'][i]
pa=ChIP_p[chrom]['bipromoter'][i]
pvalue[chrom]['bipromoter'][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['downstream'])
N=ChIP_summary[chrom]['Ns']
for i in range(0,length):
q=ChIP_summary[chrom]['downstream'][i]
p=genome_p[chrom]['downstream'][i]
pa=ChIP_p[chrom]['downstream'][i]
pvalue[chrom]['downstream'][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['gene'])
N=ChIP_summary[chrom]['Ns']
for i in range(0,length):
q=ChIP_summary[chrom]['gene'][i]
p=genome_p[chrom]['gene'][i]
pa=ChIP_p[chrom]['gene'][i]
pvalue[chrom]['gene'][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['rel_loc'][0])
N=sum(ChIP_summary[chrom]['rel_loc'][0])
for i in range(0,length):
q=ChIP_summary[chrom]['rel_loc'][0][i]
p=genome_p[chrom]['rel_loc'][0][i]
pa=ChIP_p[chrom]['rel_loc'][0][i]
pvalue[chrom]['rel_loc'][0][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['rel_loc'][1])
for i in range(0,length):
q=ChIP_summary[chrom]['rel_loc'][1][i]
p=genome_p[chrom]['rel_loc'][1][i]
pa=ChIP_p[chrom]['rel_loc'][1][i]
pvalue[chrom]['rel_loc'][1][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['rel_loc_cds'][0])
N=sum(ChIP_summary[chrom]['rel_loc_cds'][0])
for i in range(0,length):
q=ChIP_summary[chrom]['rel_loc_cds'][0][i]
p=genome_p[chrom]['rel_loc_cds'][0][i]
pa=ChIP_p[chrom]['rel_loc_cds'][0][i]
pvalue[chrom]['rel_loc_cds'][0][i]=_calc_pval(q,N,p,pa)
length=len(ChIP_summary[chrom]['rel_loc_cds'][1])
for i in range(0,length):
q=ChIP_summary[chrom]['rel_loc_cds'][1][i]
p=genome_p[chrom]['rel_loc_cds'][1][i]
pa=ChIP_p[chrom]['rel_loc_cds'][1][i]
pvalue[chrom]['rel_loc_cds'][1][i]=_calc_pval(q,N,p,pa)
N=ChIP_summary[chrom]['Ns']
q=ChIP_summary[chrom]['roi']
p=genome_p[chrom]['roi']
pa=ChIP_p[chrom]['roi']
pvalue[chrom]['roi']=_calc_pval(q,N,p,pa)
N=ChIP_summary['whole']['Ns']
q=ChIP_summary[chrom]['Ns']
p=genome_p[chrom]['chroms']
pa=ChIP_p[chrom]['chroms']
pvalue[chrom]['chroms']=_calc_pval(q,N,p,pa)
def _calc_pval(q,N,p,pa):
"""Calculate a pvalue given N,q,p,pa"""
if p>=pa:
pval=Prob.binomial_cdf(q,N,p)
else:
pval=Prob.binomial_cdf(q,N,p,lower=False)
if pval==0.0: pval=4.92e-324
return pval
def estimate_enhancer_p(summary):
"""Estimate the proportion of intergenic enhancer
Intergenic enhancers are defined as the remaining part after removing promoter, downstream, and genic regions
"""
| ChengchenZhao/DrSeq2 | ceas_lib/annotator.py | Python | gpl-3.0 | 75,107 | 0.022807 |
#!/usr/bin/env python
import argparse
from collections import OrderedDict
import itertools
import sys
from phylotoast import util, graph_util as gu
errors = []
try:
from palettable.colorbrewer.qualitative import Set3_12
except ImportError as ie:
errors.append("No module named palettable")
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
except ImportError as ie:
errors.append(ie)
if len(errors) != 0:
for item in errors:
print("Import Error:", item)
sys.exit()
def handle_program_options():
parser = argparse.ArgumentParser(description="Create a 2D or 3D PCoA plot. By default"
", this script opens a window with the plot "
"displayed if you want to change certain aspects of "
"the plot (such as rotate the view in 3D mode). If "
"the -o option is specified, the plot will be saved "
"directly to an image without the initial display "
"window.")
parser.add_argument("-i", "--coord_fp", required=True,
help="Input principal coordinates filepath (i.e. resulting file "
"from principal_coordinates.py) [REQUIRED].")
parser.add_argument("-m", "--map_fp", required=True,
help="Input metadata mapping filepath [REQUIRED].")
parser.add_argument("-g", "--group_by", required=True,
help="Any mapping categories, such as treatment type, that will "
"be used to group the data in the output iTol table. For example,"
" one category with three types will result in three data columns"
" in the final output. Two categories with three types each will "
"result in six data columns. Default is no categories and all the"
" data will be treated as a single group.")
parser.add_argument("-d", "--dimensions", default=2, type=int, choices=[2, 3],
help="Choose whether to plot 2D or 3D.")
parser.add_argument("-c", "--colors", default=None,
help="A column name in the mapping file containing hexadecimal "
"(#FF0000) color values that will be used to color the groups. "
"Each sample ID must have a color entry.")
parser.add_argument("-s", "--point_size", default=100, type=int,
help="Specify the size of the circles representing each of the "
"samples in the plot")
parser.add_argument("--pc_order", default=[1, 2], type=int, nargs=2,
help="Choose which Principle Coordinates are displayed and in "
"which order, for example: 1 2. This option is only used when a "
"2D plot is specified.")
parser.add_argument("--x_limits", type=float, nargs=2,
help="Specify limits for the x-axis instead of automatic setting "
"based on the data range. Should take the form: --x_limits -0.5 "
"0.5")
parser.add_argument("--y_limits", type=float, nargs=2,
help="Specify limits for the y-axis instead of automatic setting "
"based on the data range. Should take the form: --y_limits -0.5 "
"0.5")
parser.add_argument("--z_limits", type=float, nargs=2,
help="Specify limits for the z-axis instead of automatic setting "
"based on the data range. Should take the form: --z_limits -0.5 "
"0.5")
parser.add_argument("--z_angles", type=float, nargs=2, default=[-134.5, 23.],
help="Specify the azimuth and elevation angles for a 3D plot.")
parser.add_argument("-t", "--title", default="", help="Title of the plot.")
parser.add_argument("--figsize", default=[14, 8], type=int, nargs=2,
help="Specify the 'width height' in inches for PCoA plots. "
"Default figure size is 14x8 inches")
parser.add_argument("--font_size", default=12, type=int,
help="Sets the font size for text elements in the plot.")
parser.add_argument("--label_padding", default=15, type=int,
help="Sets the spacing in points between the each axis and its "
"label.")
parser.add_argument("--legend_loc", default="best", choices=['best','upper right','upper left',
'lower left', 'lower right', 'right', 'center left', 'center right',
'lower center', 'upper center', 'center', 'outside', 'none'],
help="Sets the location of the Legend. Default: best.")
parser.add_argument("--annotate_points", action="store_true",
help="If specified, each graphed point will be labeled with its "
"sample ID.")
parser.add_argument("--ggplot2_style", action="store_true",
help="Apply ggplot2 styling to the figure.")
parser.add_argument("-o", "--out_fp", default=None,
help="The path and file name to save the plot under. If specified"
", the figure will be saved directly instead of opening a window "
"in which the plot can be viewed before saving.")
return parser.parse_args()
def main():
args = handle_program_options()
try:
with open(args.coord_fp):
pass
except IOError as ioe:
err_msg = "\nError in input principal coordinates filepath (-i): {}\n"
sys.exit(err_msg.format(ioe))
try:
with open(args.map_fp):
pass
except IOError as ioe:
err_msg = "\nError in input metadata mapping filepath (-m): {}\n"
sys.exit(err_msg.format(ioe))
with open(args.coord_fp) as F:
pcd = F.readlines()
pcd = [line.split("\t") for line in pcd]
map_header, imap = util.parse_map_file(args.map_fp)
data_gather = util.gather_categories(imap, map_header,
args.group_by.split(","))
categories = OrderedDict([(condition, {"pc1": [], "pc2": [], "pc3": []})
for condition in data_gather.keys()])
bcolors = itertools.cycle(Set3_12.hex_colors)
if not args.colors:
colors = [bcolors.next() for _ in categories]
else:
colors = util.color_mapping(imap, map_header,
args.group_by, args.colors)
colors = colors.values()
parsed_unifrac = util.parse_unifrac(args.coord_fp)
pco = args.pc_order
if args.dimensions == 3:
pco.append(3)
pc1v = parsed_unifrac["varexp"][pco[0] - 1]
pc2v = parsed_unifrac["varexp"][pco[1] - 1]
if args.dimensions == 3:
pc3v = parsed_unifrac["varexp"][pco[2] - 1]
for sid, points in parsed_unifrac["pcd"].items():
for condition, dc in data_gather.items():
if sid in dc.sids:
cat = condition
break
categories[cat]["pc1"].append((sid, points[pco[0] - 1]))
categories[cat]["pc2"].append((sid, points[pco[1] - 1]))
if args.dimensions == 3:
categories[cat]["pc3"].append((sid, points[pco[2] - 1]))
axis_str = "PC{} (Percent Explained Variance {:.3f}%)"
# initialize plot
fig = plt.figure(figsize=args.figsize)
if args.dimensions == 3:
ax = fig.add_subplot(111, projection="3d")
ax.view_init(elev=args.z_angles[1], azim=args.z_angles[0])
ax.set_zlabel(axis_str.format(3, pc3v), labelpad=args.label_padding)
if args.z_limits:
ax.set_zlim(args.z_limits)
else:
ax = fig.add_subplot(111)
# plot data
for i, cat in enumerate(categories):
if args.dimensions == 3:
ax.scatter(xs=[e[1] for e in categories[cat]["pc1"]],
ys=[e[1] for e in categories[cat]["pc2"]],
zs=[e[1] for e in categories[cat]["pc3"]],
zdir="z", c=colors[i], s=args.point_size, label=cat,
edgecolors="k")
else:
ax.scatter([e[1] for e in categories[cat]["pc1"]],
[e[1] for e in categories[cat]["pc2"]],
c=colors[i], s=args.point_size, label=cat, edgecolors="k")
# Script to annotate PCoA sample points.
if args.annotate_points:
for x, y in zip(categories[cat]["pc1"], categories[cat]["pc2"]):
ax.annotate(
x[0], xy=(x[1], y[1]), xytext=(-10, -15),
textcoords="offset points", ha="center", va="center",
)
# customize plot options
if args.x_limits:
ax.set_xlim(args.x_limits)
if args.y_limits:
ax.set_ylim(args.y_limits)
ax.set_xlabel(axis_str.format(pco[0], float(pc1v)), labelpad=args.label_padding)
ax.set_ylabel(axis_str.format(pco[1], float(pc2v)), labelpad=args.label_padding)
if args.legend_loc != "none":
if args.legend_loc == "outside":
leg = plt.legend(bbox_to_anchor=(1.05, 0.94), loc=2, borderaxespad=-2.0,
scatterpoints=3, frameon=True, framealpha=1)
# https://stackoverflow.com/a/45846024
plt.gcf().canvas.draw()
invFigure = plt.gcf().transFigure.inverted()
lgd_pos = leg.get_window_extent()
lgd_coord = invFigure.transform(lgd_pos)
lgd_xmax = lgd_coord[1, 0]
ax_pos = plt.gca().get_window_extent()
ax_coord = invFigure.transform(ax_pos)
ax_xmax = ax_coord[1, 0]
shift = 1 - (lgd_xmax - ax_xmax)
plt.gcf().tight_layout(rect=(0, 0, shift, 1))
else:
leg = plt.legend(loc=args.legend_loc, scatterpoints=3, frameon=True, framealpha=1)
leg.get_frame().set_edgecolor('k')
else:
plt.legend().remove()
# Set the font characteristics
font = {"family": "normal", "weight": "bold", "size": args.font_size}
mpl.rc("font", **font)
if args.title:
ax.set_title(args.title)
if args.ggplot2_style and not args.dimensions == 3:
gu.ggplot2_style(ax)
# save or display result
if args.out_fp:
fig.savefig(args.out_fp, facecolor="white", edgecolor="none", bbox_inches="tight",
pad_inches=0.2)
else:
plt.show()
if __name__ == "__main__":
sys.exit(main())
| smdabdoub/phylotoast | bin/PCoA.py | Python | mit | 10,870 | 0.004416 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from blinker import signal, Namespace, NamedSignal
from yosaipy2.core.event.abcs import EventBus
from typing import Dict
from functools import wraps
class BlinkerEventBus(EventBus):
def __init__(self):
# type: (str) -> None
self.AUTO_TOPIC = "blinker_eventbus_auto_topic"
self._signals = {} # type: Dict[NamedSignal]
def send_message(self, topic_name, **kwargs):
if topic_name not in self._signals:
sig = signal(topic_name)
self._signals[topic_name] = sig
else:
sig = self._signals[topic_name]
sig.send(None, **kwargs)
def subscribe(self, func, topic_name):
if topic_name not in self._signals:
sig = signal(topic_name)
self._signals[topic_name] = sig
else:
sig = self._signals[topic_name]
callback = self._adapter(func, topic_name)
sig.connect(callback)
def unsubscribe(self, listener, topic_name):
pass
@staticmethod
def _adapter(func, topic_name):
@wraps(func)
def callback(sender, **kwargs):
func(topic=topic_name, **kwargs)
return callback
def isSubscribed(self, listener, topic_name):
if topic_name not in self._signals:
return False
return True
event_bus = BlinkerEventBus()
| jellybean4/yosaipy2 | yosaipy2/core/event/event_bus.py | Python | apache-2.0 | 1,390 | 0 |
import webapp2
from google.appengine.ext import db
from google.appengine.ext import ndb
from db_class import DerivedClass as OldDerivedClass
from ndb_class import BaseClass as NewBaseClass
from ndb_class import DerivedClass as NewDerivedClass
class Repro(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
# Create a derived object using google.appengine.ext.db
obj = OldDerivedClass(name='foo', version='bar')
db_key = obj.put()
self.response.write('%s, %d\n' % (db_key.kind(), db_key.id()))
# Attempt to load using the converted key
ndb_key = ndb.Key.from_old_key(db_key)
try:
ndb_key.get()
except ndb.KindError:
self.response.write('failed (KindError): %s\n' % str(ndb_key))
# Attempt to create a new key using the ndb derived class
derived_key = ndb.Key(NewDerivedClass, ndb_key.id())
obj = derived_key.get()
if not obj:
self.response.write('failed (None): %s\n' % str(derived_key))
# Attempt to create a new key using the ndb base class
base_key = ndb.Key(NewBaseClass, ndb_key.id())
obj = derived_key.get()
if not obj:
self.response.write('failed (None): %s\n' % str(base_key))
# Manually create a new key using the ndb derived class name
force_key = ndb.Key('DerivedClass', ndb_key.id())
try:
force_key.get()
except ndb.KindError:
self.response.write('failed (KindError): %s\n' % str(force_key))
application = webapp2.WSGIApplication([('/', Repro)], debug=True)
| lantius/ndb-key | repro.py | Python | mit | 1,545 | 0.005825 |
# Copyright 2011 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
fixed_ips = Table(
"fixed_ips",
meta,
Column(
"id",
Integer(),
primary_key=True,
nullable=False))
#
# New Tables
#
# None
#
# Tables to alter
#
# None
#
# Columns to add to existing tables
#
fixed_ips_addressV6 = Column(
"addressV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_netmaskV6 = Column(
"netmaskV6",
String(
length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
fixed_ips_gatewayV6 = Column(
"gatewayV6",
String(
length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
# Add columns to existing tables
fixed_ips.create_column(fixed_ips_addressV6)
fixed_ips.create_column(fixed_ips_netmaskV6)
fixed_ips.create_column(fixed_ips_gatewayV6)
| nii-cloud/dodai-compute | nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py | Python | apache-2.0 | 2,067 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# skyscanner documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import skyscanner
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Skyscanner Python SDK'
copyright = u'2015, Ardy Dedase'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = skyscanner.__version__
# The full version, including alpha/beta/rc tags.
release = skyscanner.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skyscannerdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'skyscanner.tex',
u'Skyscanner Python SDK Documentation',
u'Ardy Dedase', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'skyscanner',
u'Skyscanner Python SDK Documentation',
[u'Ardy Dedase'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'skyscanner',
u'Skyscanner Python SDK Documentation',
u'Ardy Dedase',
'skyscanner',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| valery-barysok/skyscanner-python-sdk | docs/conf.py | Python | apache-2.0 | 8,466 | 0.005315 |
""" Initialization code related to Commotion Router UI unit tests"""
| opentechinstitute/commotion-router-test-suite | tests/__init__.py | Python | agpl-3.0 | 69 | 0 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show rack --rack`."""
from aquilon.aqdb.model import Rack
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
class CommandShowRackRack(BrokerCommand):
required_parameters = ["rack"]
def render(self, session, rack, **arguments):
return Rack.get_unique(session, rack, compel=True)
| stdweird/aquilon | lib/python2.6/aquilon/worker/commands/show_rack_rack.py | Python | apache-2.0 | 1,055 | 0 |
'''
qobuz.extension.kooli.script.kooli-xbmc-service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:part_of: kodi-qobuz
:copyright: (c) 2012-2018 by Joachim Basmaison, Cyril Leclerc
:license: GPLv3, see LICENSE for more details.
'''
from os import path as P
import SocketServer
import socket
import sys
import threading
import time
base_path = P.abspath(P.dirname(__file__))
try:
import kooli as _ # pylint:disable=E0401
except ImportError:
sys.path.append(P.abspath(P.join(base_path, P.pardir, P.pardir)))
from kooli import log
from kooli import qobuz_lib_path
from kodi_six import xbmc # pylint:disable=E0401
from kooli.application import application, shutdown_server, qobuzApp
from kooli.monitor import Monitor
from qobuz import config
from qobuz.api import api
from qobuz.api.user import current as user
from qobuz.debug import getLogger
from qobuz.gui.util import notify_warn
import qobuz.gui.util as gui
logger = getLogger(__name__)
def my_finish(self):
if not self.wfile.closed:
try:
self.wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
try:
self.wfile.close()
self.rfile.close()
except socket.error:
pass
SocketServer.StreamRequestHandler.finish = my_finish # Ugly monkey patching
def is_empty(obj):
if obj is None:
return True
if isinstance(obj, basestring):
if obj == '':
return True
return False
def is_authentication_set():
username = config.app.registry.get('username')
password = config.app.registry.get('password')
if not is_empty(username) and not is_empty(password):
return True
return False
def is_service_enable():
return config.app.registry.get('enable_scan_feature', to='bool')
@application.before_request
def shutdown_request():
if monitor.abortRequested:
shutdown_server()
return None
class KooliService(threading.Thread):
name = 'httpd'
def __init__(self, port=33574):
threading.Thread.__init__(self)
self.daemon = True
self.port = port
self.running = False
self.threaded = True
self.processes = 2
self.alive = True
def stop(self):
shutdown_server()
self.alive = False
def run(self):
while self.alive:
if not is_authentication_set():
gui.notify_warn('Authentication not set',
'You need to enter credentials')
elif not user.logged:
if not api.login(
username=qobuzApp.registry.get('username'),
password=qobuzApp.registry.get('password')):
gui.notify_warn('Login failed', 'Invalid credentials')
else:
try:
application.run(port=self.port,
threaded=True,
processes=0,
debug=False,
use_reloader=False,
use_debugger=False,
use_evalex=True,
passthrough_errors=False)
except Exception as e:
logger.error('KooliService port: %s Error: %s',
self.port, e)
raise e
time.sleep(1)
if __name__ == '__main__':
monitor = Monitor()
if is_service_enable():
monitor.add_service(KooliService())
else:
notify_warn('Qobuz service / HTTPD',
'Service is disabled from configuration')
monitor.start_all_service()
alive = True
while alive:
abort = False
try:
abort = monitor.abortRequested
except Exception as e:
logger.error('Error while getting abortRequested %s', e)
if abort:
alive = False
continue
xbmc.sleep(1000)
monitor.stop_all_service()
| tidalf/plugin.audio.qobuz | resources/lib/qobuz/extension/kooli/kooli/script/kooli-xbmc-service.py | Python | gpl-3.0 | 4,243 | 0.002593 |
from django.contrib import admin
from .models import Post, Category, Tag
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
# 注册新增PostAdmin
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
| wldisaster/Alice | blog/admin.py | Python | gpl-3.0 | 339 | 0.009063 |
# -*- coding: utf-8 -*-
from .common_settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dz(#w(lfve24ck!!yrt3l7$jfdoj+fgf+ru@w)!^gn9aq$s+&y'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| Mustapha90/IV16-17 | tango_with_django_project/dev_settings.py | Python | gpl-3.0 | 375 | 0.002667 |
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
VERSION = '0.4.1'
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='pyintercept',
version=VERSION,
description="Intercept function calls from Python scripts",
author="Caio Ariede",
author_email="caio.ariede@gmail.com",
url="http://github.com/caioariede/pyintercept",
license="MIT",
zip_safe=False,
platforms=["any"],
packages=find_packages(),
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
include_package_data=True,
install_requires=[
'byteplay',
],
tests_require=[
'pytest',
'uncompyle6',
],
test_suite='py.test',
cmdclass={'test': PyTest},
)
| caioariede/pyintercept | setup.py | Python | mit | 1,498 | 0 |
from distutils.core import setup
setup(
name='captcha2upload',
packages=['captcha2upload'],
package_dir={'captcha2upload': 'src/captcha2upload'},
version='0.2',
install_requires=['requests'],
description='Upload your image and solve captche using the 2Captcha '
'Service',
author='Alessandro Sbarbati',
author_email='miriodev@gmail.com',
url='https://github.com/Mirio/captcha2upload',
download_url='https://github.com/Mirio/captcha2upload/tarball/0.1',
keywords=['2captcha', 'captcha', 'Image Recognition'],
classifiers=["Topic :: Scientific/Engineering :: Image Recognition"],
)
| Mirio/captcha2upload | setup.py | Python | bsd-2-clause | 647 | 0.001546 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndFilterFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _map_and_filter_fusion_test_cases():
"""Generates test cases for the MapAndFilterFusion optimization."""
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testMapAndFilter(self, dataset, function, predicate):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if sess.run(b):
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(*_map_and_filter_fusion_test_cases())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map", "FilterByLastComponent"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map",
"Filter"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
if __name__ == "__main__":
test.main()
| alshedivat/tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py | Python | apache-2.0 | 4,199 | 0.009288 |
# CubETL
# Copyright (c) 2013-2019 Jose Juan Montes
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from cubetl.core import Node, Component
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TemplateRendererBase(Node):
def __init__(self, template):
self.template = template
def render(self, ctx, data):
raise NotImplementedError()
def process(self, ctx, m):
#template = ctx.interpolate(self.template, m)
result = self.render(ctx, {'m': m})
m['templated'] = result
yield m
| jjmontesl/cubetl | cubetl/template/__init__.py | Python | mit | 1,587 | 0.00189 |
"""
Tests for exporting OLX content.
"""
from __future__ import absolute_import
import shutil
import tarfile
import unittest
from six import StringIO
from tempfile import mkdtemp
import ddt
import six
from django.core.management import CommandError, call_command
from path import Path as path
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestArgParsingCourseExportOlx(unittest.TestCase):
"""
Tests for parsing arguments for the `export_olx` management command
"""
def test_no_args(self):
"""
Test export command with no arguments
"""
if six.PY2:
errstring = "Error: too few arguments"
else:
errstring = "Error: the following arguments are required: course_id"
with self.assertRaisesRegexp(CommandError, errstring):
call_command('export_olx')
@ddt.ddt
class TestCourseExportOlx(ModuleStoreTestCase):
"""
Test exporting OLX content from a course or library.
"""
def test_invalid_course_key(self):
"""
Test export command with an invalid course key.
"""
errstring = "Unparsable course_id"
with self.assertRaisesRegexp(CommandError, errstring):
call_command('export_olx', 'InvalidCourseID')
def test_course_key_not_found(self):
"""
Test export command with a valid course key that doesn't exist.
"""
errstring = "Invalid course_id"
with self.assertRaisesRegexp(CommandError, errstring):
call_command('export_olx', 'x/y/z')
def create_dummy_course(self, store_type):
"""Create small course."""
course = CourseFactory.create(default_store=store_type)
self.assertTrue(
modulestore().has_course(course.id),
u"Could not find course in {}".format(store_type)
)
return course.id
def check_export_file(self, tar_file, course_key):
"""Check content of export file."""
names = tar_file.getnames()
dirname = "{0.org}-{0.course}-{0.run}".format(course_key)
self.assertIn(dirname, names)
# Check if some of the files are present, without being exhaustive.
self.assertIn("{}/about".format(dirname), names)
self.assertIn("{}/about/overview.html".format(dirname), names)
self.assertIn("{}/assets/assets.xml".format(dirname), names)
self.assertIn("{}/policies".format(dirname), names)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_export_course(self, store_type):
test_course_key = self.create_dummy_course(store_type)
tmp_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, tmp_dir)
filename = tmp_dir / 'test.tar.gz'
call_command('export_olx', '--output', filename, six.text_type(test_course_key))
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file, test_course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_export_course_stdout(self, store_type):
test_course_key = self.create_dummy_course(store_type)
out = StringIO()
call_command('export_olx', six.text_type(test_course_key), stdout=out)
out.seek(0)
output = out.read()
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file, test_course_key)
| ESOedX/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_export_olx.py | Python | agpl-3.0 | 3,608 | 0.000554 |
# subsystemBonusCaldariOffensive3RemoteShieldBoosterHeat
#
# Used by:
# Subsystem: Tengu Offensive - Support Processor
type = "passive"
def handler(fit, src, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Emission Systems"),
"overloadSelfDurationBonus", src.getModifiedItemAttr("subsystemBonusCaldariOffensive3"),
skill="Caldari Offensive Systems")
| bsmr-eve/Pyfa | eos/effects/subsystembonuscaldarioffensive3remoteshieldboosterheat.py | Python | gpl-3.0 | 459 | 0.004357 |
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
params = {
'response': 1049,
'family': 'binomial',
'beta_epsilon': 0.0001,
'alpha': 1.0,
'lambda': 1e-05,
'n_folds': 1,
'max_iter': 20,
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_syn_2659x1049(self):
csvFilename = "syn_2659x1049.csv"
csvPathname = 'logreg' + '/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
kwargs = params
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
def test_GLM2_syn_2659x1049x2enum(self):
csvFilename = "syn_2659x1049x2enum.csv"
csvPathname = 'logreg' + '/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
kwargs = params
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| 111t8e/h2o-2 | py/testdir_single_jvm/test_GLM2_syn_2659x1049.py | Python | apache-2.0 | 1,417 | 0.012703 |
# This script does the following:
#
# - create a user using bonafide and and invite code given as an environment
# variable.
#
# - create and upload an OpenPGP key manually, as that would be
# a responsibility of bitmask-dev.
#
# - send an email to the user using sendmail, with a secret in the body.
#
# - start a soledad client using the created user.
#
# - download pending blobs. There should be only one.
#
# - look inside the blob, parse the email message.
#
# - compare the token in the incoming message with the token in the sent
# message and succeed if the tokens are the same.
#
# - delete the user (even if the test failed). (TODO)
import pytest
from utils import get_session
from utils import gen_key
from utils import put_key
from utils import send_email
from utils import get_incoming_fd
from utils import get_received_secret
@pytest.inlineCallbacks
def test_incoming_mail_pipeline(soledad_client, tmpdir):
# create a user and login
session = yield get_session(tmpdir)
# create a OpenPGP key and upload it
key = gen_key(session.username)
yield put_key(session.uuid, session.token, str(key.pubkey))
# get a soledad client for that user
client = soledad_client(
uuid=session.uuid,
passphrase='123',
token=session.token)
# send the email
sent_secret = send_email(session.username)
# check the incoming blob and compare sent and received secrets
fd = yield get_incoming_fd(client)
received_secret = get_received_secret(key, fd)
assert sent_secret == received_secret
# TODO: delete user in the end
| leapcode/soledad | tests/e2e/test_incoming_mail_pipeline.py | Python | gpl-3.0 | 1,604 | 0 |
#!/usr/bin/env python
"""compares BSR values between two groups in a BSR matrix
Numpy and BioPython need to be installed. Python version must be at
least 2.7 to use collections"""
from optparse import OptionParser
import subprocess
from ls_bsr.util import prune_matrix
from ls_bsr.util import compare_values
from ls_bsr.util import find_uniques
import sys
import os
def test_file(option, opt_str, value, parser):
try:
with open(value): setattr(parser.values, option.dest, value)
except IOError:
print('%s file cannot be opened' % option)
sys.exit()
def add_headers(infile, outfile, lower, upper):
file_out = open(outfile, "w")
file_out.write("marker"+"\t"+"group1_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group_1"+"\t"+">="+str(lower)+"\t"+"group2_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group2"+"\t"+">="+str(lower)+"\n")
with open(infile) as my_file:
for line in my_file:
file_out.write(line)
file_out.close()
def main(matrix,group1,group2,fasta,upper,lower):
prune_matrix(matrix,group1,group2)
compare_values("group1_pruned.txt","group2_pruned.txt",upper,lower)
subprocess.check_call("paste group1_out.txt group2_out.txt > groups_combined.txt", shell=True)
find_uniques("groups_combined.txt",fasta)
add_headers("groups_combined.txt","groups_combined_header.txt",lower,upper)
os.system("rm group1_out.txt group2_out.txt")
if __name__ == "__main__":
usage="usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-b", "--bsr_matrix", dest="matrix",
help="/path/to/bsr_matrix [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-f", "--fasta", dest="fasta",
help="/path/to/ORF_fasta_file [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-1", "--group_1_ids", dest="group1",
help="new line separated file with group1 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-2", "--group_2_ids", dest="group2",
help="new line separated file with group2 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-u", "--upper_bound", dest="upper",
help="upper bound for BSR comparisons, defaults to 0.8",
default="0.8", type="float")
parser.add_option("-l", "--lower_bound", dest="lower",
help="lower bound for BSR comparisons, defaults to 0.4",
default="0.4", type="float")
options, args = parser.parse_args()
mandatories = ["matrix", "group1", "group2", "fasta"]
for m in mandatories:
if not options.__dict__[m]:
print("\nMust provide %s.\n" %m)
parser.print_help()
exit(-1)
main(options.matrix,options.group1,options.group2,options.fasta,options.upper,options.lower)
| jasonsahl/LS-BSR | tools/compare_BSR.py | Python | gpl-3.0 | 3,059 | 0.012422 |
from os import path, access, W_OK, R_OK
import argparse
import logging
from django.core.management.base import BaseCommand, CommandError
from kinksorter_app.functionality.movie_handling import merge_movie, recognize_movie
from kinksorter_app.models import Movie, PornDirectory
from kinksorter_app.functionality.directory_handling import PornDirectoryHandler, get_target_porn_directory
from kinksorter_app.functionality.sorting import TargetSorter
logger = logging.getLogger(__name__)
def argcheck_dir(string):
if path.isdir(string) and access(string, W_OK) and access(string, R_OK):
return path.abspath(string)
raise argparse.ArgumentTypeError('{} is no directory or isn\'t writeable'.format(string))
class Command(BaseCommand):
logging.basicConfig(level=logging.DEBUG)
help = "Syncs a source directory into a destination directory"
def add_arguments(self, parser):
parser.add_argument('src_directory', type=argcheck_dir)
parser.add_argument('dst_directory', type=argcheck_dir)
def handle(self, *args, **options):
src_dir = options['src_directory']
dst_dir = options['dst_directory']
logger.info("Start")
if PornDirectory.objects.filter(id=0).exists():
dst_handler = PornDirectoryHandler(0)
else:
dst_handler = PornDirectoryHandler(None, init_path=dst_dir, name="dest", id_=0)
dst_handler.scan() # only scan initially, since the merged files get added to the db
if PornDirectory.objects.filter(path=src_dir).exists():
PornDirectory.objects.delete(path=src_dir) # don't keep the src directory, to force resyncs
src_handler = PornDirectoryHandler(None, init_path=src_dir, name="src")
else:
src_handler = PornDirectoryHandler(None, init_path=src_dir, name="src")
src_handler.scan()
for movie in src_handler.directory.movies.all():
if not movie.scene_id:
recognize_movie(movie, None)
if not movie.scene_id:
# if it was not recognized during first run, recognize with extensive=True again
recognize_movie(movie, None, extensive=True)
if not dst_handler.directory.movies.filter(scene_id=movie.scene_id).exists():
merge_movie(movie.id)
ts = TargetSorter("move", list(dst_handler.directory.movies.all()))
ts.sort()
| sistason/kinksorter2 | src/kinksorter_app/management/commands/kink_besteffortsync.py | Python | gpl-3.0 | 2,449 | 0.004492 |
# -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 26 00:00:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys, os
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
#
#class CustomLatexFormatter(LatexFormatter):
# def __init__(self, **options):
# super(CustomLatexFormatter, self).__init__(**options)
# self.verboptions = r"formatcom=\footnotesize"
#
#PygmentsBridge.latex_formatter = CustomLatexFormatter
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3 project'
copyright = u'2006-2019'
#author = u'test'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = u'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'est vtest'
html_title = 'Manual'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# VerbatimBorderColor: make the box around code samples blend into the background
# Tip from https://stackoverflow.com/questions/29403100/how-to-remove-the-box-around-the-code-block-in-restructuredtext-with-sphinx
#
# sphinxcode is the wrapper around \texttt that sphinx.sty provides.
# Redefine it here as needed to change the inline literal font size
# (double backquotes) to either \footnotesize (8pt) or \small (9pt)
#
# See above to change the font size of verbatim code blocks
#
# 'preamble': '',
'preamble': u'''\\usepackage{amssymb}
\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}
\\renewcommand{\\sphinxcode}[1]{\\texttt{\\small{#1}}}
'''
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ns-3-manual.tex', u'ns-3 Manual',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, will not define \strong, \code, \titleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... to help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-manual', u'ns-3 Manual',
[u'ns-3 project'], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for texinfo output ---------------------------------------
#texinfo_documents = [
# (master_doc, 'test', u'test Documentation',
# author, 'test', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| hanyassasa87/ns3-802.11ad | doc/manual/source/conf.py | Python | gpl-2.0 | 10,879 | 0.001287 |
# -*- coding: utf-8 -*-
import os
from datetime import datetime
import six
from bs4 import BeautifulSoup
from git import InvalidGitRepositoryError, Repo
from mock import ANY, call
from nose.tools import (
assert_equal,
assert_greater,
assert_in,
assert_less_equal,
assert_not_in,
assert_raises,
)
from sphinx_git import GitChangelog
from . import MakeTestableMixin, TempDirTestCase
class TestableGitChangelog(MakeTestableMixin, GitChangelog):
pass
class ChangelogTestCase(TempDirTestCase):
def setup(self):
super(ChangelogTestCase, self).setup()
self.changelog = TestableGitChangelog()
self.changelog.state.document.settings.env.srcdir = self.root
class TestNoRepository(ChangelogTestCase):
def test_not_a_repository(self):
assert_raises(InvalidGitRepositoryError, self.changelog.run)
class TestWithRepository(ChangelogTestCase):
def _set_username(self, username):
config_writer = self.repo.config_writer()
config_writer.set_value('user', 'name', username)
config_writer.release()
def setup(self):
super(TestWithRepository, self).setup()
self.repo = Repo.init(self.root)
self._set_username('Test User')
def test_no_commits(self):
assert_raises(ValueError, self.changelog.run)
def test_single_commit_produces_single_item(self):
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(1, len(bullet_list.findAll('list_item')))
def test_single_commit_message_and_user_display(self):
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('my root commit', par_children[0].text)
assert_equal('Test User', par_children[2].text)
def test_single_commit_message_and_user_display_with_non_ascii_chars(self):
self._set_username('þéßþ Úßéë')
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
list_markup = BeautifulSoup(six.text_type(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('my root commit', par_children[0].text)
assert_equal(u'þéßþ Úßéë', par_children[2].text)
def test_single_commit_time_display(self):
before = datetime.now().replace(microsecond=0)
self.repo.index.commit('my root commit')
nodes = self.changelog.run()
after = datetime.now()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item.paragraph
children = list(item.childGenerator())
timestamp = datetime.strptime(children[4].text, '%Y-%m-%d %H:%M:%S')
assert_less_equal(before, timestamp)
assert_greater(after, timestamp)
def test_single_commit_default_detail_setting(self):
self.repo.index.commit(
'my root commit\n\nadditional information\nmore info'
)
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(2, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('my root commit', par_children[0].text)
assert_equal('Test User', par_children[2].text)
assert_equal(
str(children[1]),
'<paragraph>additional information\nmore info</paragraph>'
)
def test_single_commit_preformmated_detail_lines(self):
self.repo.index.commit(
'my root commit\n\nadditional information\nmore info'
)
self.changelog.options.update({'detailed-message-pre': True})
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(2, len(children))
assert_equal(
str(children[1]),
'<literal_block xml:space="preserve">additional information\n'
'more info</literal_block>'
)
def test_more_than_ten_commits(self):
for n in range(15):
self.repo.index.commit('commit #{0}'.format(n))
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(10, len(bullet_list.findAll('list_item')))
for n, child in zip(range(15, 5), bullet_list.childGenerator()):
assert_in('commit #{0}'.format(n), child.text)
assert_not_in('commit #4', bullet_list.text)
def test_specifying_number_of_commits(self):
for n in range(15):
self.repo.index.commit('commit #{0}'.format(n))
self.changelog.options.update({'revisions': 5})
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(5, len(bullet_list.findAll('list_item')))
for n, child in zip(range(15, 10), bullet_list.childGenerator()):
assert_in('commit #{0}'.format(n), child.text)
assert_not_in('commit #9', bullet_list.text)
def test_specifying_a_rev_list(self):
self.repo.index.commit('before tag')
commit = self.repo.index.commit('at tag')
self.repo.index.commit('after tag')
self.repo.index.commit('last commit')
self.repo.create_tag('testtag', commit)
self.changelog.options.update({'rev-list': 'testtag..'})
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(2, len(bullet_list.findAll('list_item')))
children = list(bullet_list.childGenerator())
first_element = children[0]
second_element = children[1]
assert_in('last commit', first_element.text)
assert_in('after tag', second_element.text)
def test_warning_given_if_rev_list_and_revisions_both_given(self):
self.repo.index.commit('a commit')
self.changelog.options.update({'rev-list': 'HEAD', 'revisions': 12})
nodes = self.changelog.run()
assert_equal(
1, self.changelog.state.document.reporter.warning.call_count
)
def test_line_number_displayed_in_multiple_option_warning(self):
self.repo.index.commit('a commit')
self.changelog.options.update({'rev-list': 'HEAD', 'revisions': 12})
nodes = self.changelog.run()
document_reporter = self.changelog.state.document.reporter
assert_equal(
[call(ANY, line=self.changelog.lineno)],
document_reporter.warning.call_args_list
)
def test_name_filter(self):
self.repo.index.commit('initial')
for file_name in ['abc.txt', 'bcd.txt', 'abc.other', 'atxt']:
full_path = os.path.join(self.repo.working_tree_dir, file_name)
f = open(full_path, 'w+')
f.close()
self.repo.index.add([full_path])
self.repo.index.commit('commit with file {}'.format(file_name))
self.repo.index.commit('commit without file')
self.changelog.options.update({'filename_filter': 'a.*txt'})
nodes = self.changelog.run()
assert_equal(1, len(nodes))
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(2, len(bullet_list.findAll('list_item')), nodes)
next_file = os.path.join(self.repo.working_tree_dir, 'atxt')
f = open(next_file, 'w+')
f.close()
self.repo.index.add([next_file])
self.repo.index.commit('show me')
nodes = self.changelog.run()
assert_equal(1, len(nodes), nodes)
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
assert_equal(1, len(list_markup.findAll('bullet_list')))
bullet_list = list_markup.bullet_list
assert_equal(2, len(bullet_list.findAll('list_item')), nodes)
def test_single_commit_hide_details(self):
self.repo.index.commit(
'Another commit\n\nToo much information'
)
self.changelog.options.update({'hide_details': True})
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(5, len(par_children))
assert_equal('Another commit', par_children[0].text)
assert_equal('Test User', par_children[2].text)
def test_single_commit_message_hide_author(self):
self.repo.index.commit('Yet another commit')
self.changelog.options.update({'hide_author': True})
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
print(children)
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(3, len(par_children))
assert_equal('Yet another commit', par_children[0].text)
assert_not_in(' by Test User', par_children[1].text)
assert_in(' at ', par_children[1].text)
def test_single_commit_message_hide_date(self):
self.repo.index.commit('Yet yet another commit')
self.changelog.options.update({'hide_date': True})
nodes = self.changelog.run()
list_markup = BeautifulSoup(str(nodes[0]), features='xml')
item = list_markup.bullet_list.list_item
children = list(item.childGenerator())
print(children)
assert_equal(1, len(children))
par_children = list(item.paragraph.childGenerator())
assert_equal(3, len(par_children))
assert_equal('Yet yet another commit', par_children[0].text)
assert_not_in(' at ', par_children[1].text)
assert_in(' by ', par_children[1].text)
class TestWithOtherRepository(TestWithRepository):
"""
The destination repository is not in the same repository as the rst files.
"""
def setup(self):
super(TestWithOtherRepository, self).setup()
self.changelog.state.document.settings.env.srcdir = os.getcwd()
self.changelog.options.update({'repo-dir': self.root})
| OddBloke/sphinx-git | tests/test_git_changelog.py | Python | gpl-3.0 | 11,748 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 03:24:37 2017
@author: Yuki
"""
import os,sys,logging
ENTRYPOINT=__path__[0]
ICONPATH=os.path.join(ENTRYPOINT,'Icons','logo.png')
KUCHINAWA='Kuchinawa'
from kuchinawa.Compile import compileUi
from kuchinawa.Thread import Main
#change the multiprocessing's context to 'spawn'
try:
import multiprocessing
multiprocessing.set_start_method('spawn')
except:
print('The context of multiprocessing is already set.')
def run_sample():
'''Run a sample program'''
from PyQt5.QtWidgets import QApplication
from kuchinawa.Examples import SinCos
app = QApplication([])
s=SinCos.Sample()
sys.exit(app.exec_())
| threemeninaboat3247/kuchinawa | kuchinawa/__init__.py | Python | mit | 687 | 0.020378 |
import sys
import sqlite3
import CONST
import re
"""
Provides the low-level functions to insert, query and update the db
"""
def init():
con = sqlite3.connect( CONST.db_name )
# asr value is auto-speech-recognition rendered captions, either 0 (false) or 1 (true)
con.execute( '''CREATE TABLE IF NOT EXISTS subtitles
( urlid text, title text, captions text, timestamps text, asr integer, ROWID INTEGER PRIMARY KEY )''' )
return con
def insert( *args, table='subtitles', **kwargs ):
"""
Takes 3 arguments in the following order: String video_title, String url_id, String subtitles
"""
con = init()
try:
with con:
con.execute( "INSERT INTO " + table + " VALUES ( '" + args[0] + "', '" + args[1] + "', '" + args[2] + "', '" + args[3] + "', '" + args[4] + "', NULL )" )
except sqlite3.IntegrityError:
print( "Error inserting into db" )
def get_rowid_from_urlid( urlid ):
"""
Returns a row id to select columns from
"""
con = init()
try:
with con:
rowid = str( con.execute( "SELECT rowid FROM subtitles WHERE urlid =:urlid", {"urlid": urlid} ).fetchone()[0] )
except sqlite3.IntegrityError:
print( "Error in get_rowid_from_urlid" )
#print( "rowid = " + str( rowid ) )
return rowid
def get_column_from_rowid( rowid, column ):
con = init()
try:
with con:
column_data = str( con.execute( "SELECT " + column + " FROM subtitles WHERE rowid = " + rowid + ";" ).fetchone()[0] )
except sqlite3.IntegrityError:
print( "Error in get_column_from_rowid" )
return column_data
def get_column_from_urlid( urlid, column ):
return get_column_from_rowid( get_rowid_from_urlid( urlid ), column )
def parse_subtitles( subtitles ):
# match[0] is timestamp, [1] is caption
matches = re.findall( r'(\d\d:\d\d:\d\d\.\d\d\d\s-->\s\d\d:\d\d:\d\d\.\d\d\d)\\n([\w\s\d\\\,\.\;\:\$\!\%\)\(\?\/\'\"\-]+)\\n\\n', subtitles )
captions = ""
timestamps = ""
count = 0
for match in matches:
captions += '<' + str( count ) + '>' + match[1]
timestamps += '<' + str( count ) + '>' + match[0]
count += 1
return { 'captions' : captions, 'timestamps' : timestamps }
def insert_raw_subtitles( urlid, raw_subs, title ):
subs = str( raw_subs )[2:-1]
parsed_subs = parse_subtitles( subs )
insert( urlid, title, parsed_subs['captions'], parsed_subs['timestamps'], '0')
| krarkrrrc/vidpager | db/DbTools.py | Python | gpl-3.0 | 2,503 | 0.029165 |
from models.RepPoints.builder import RepPoints as Detector
from models.dcn.builder import DCNResNetFPN as Backbone
from models.RepPoints.builder import RepPointsNeck as Neck
from models.RepPoints.builder import RepPointsHead as Head
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncbn", ndev=8, wd_mult=1.0)
normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
# normalizer = NormalizeParam.normalizer
normalizer = normalizer_factory(type="fixbn")
depth = 101
num_c3_block = 0
num_c4_block = 3
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class HeadParam:
num_class = 1 + 80
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
class point_generate:
num_points = 9
scale = 4
stride = (8, 16, 32, 64, 128)
transform = "moment"
class head:
conv_channel = 256
point_conv_channel = 256
mean = None
std = None
class proposal:
pre_nms_top_n = 1000
post_nms_top_n = None
nms_thr = None
min_bbox_side = None
class point_target:
target_scale = 4
num_pos = 1
class bbox_target:
pos_iou_thr = 0.5
neg_iou_thr = 0.5
min_pos_iou = 0.0
class focal_loss:
alpha = 0.25
gamma = 2.0
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = None
image_roi = None
batch_image = None
class regress_target:
class_agnostic = None
mean = None
std = None
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = None
stride = None
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
head = Head(HeadParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["gn"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = 35
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3
iter = 2000
class TestScaleParam:
short_ranges = [600, 800, 1000, 1200]
long_ranges = [2000, 2000, 2000, 2000]
@staticmethod
def add_resize_info(roidb):
ms_roidb = []
for r_ in roidb:
for short, long in zip(TestScaleParam.short_ranges, TestScaleParam.long_ranges):
r = r_.copy()
r["resize_long"] = long
r["resize_short"] = short
ms_roidb.append(r)
return ms_roidb
class TestParam:
min_det_score = 0.05 # filter appended boxes
max_det_per_image = 100
process_roidb = TestScaleParam.add_resize_info
def process_output(x, y):
return x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class RandResizeParam:
short = None # generate on the fly
long = None
short_ranges = [600, 800, 1000, 1200]
long_ranges = [2000, 2000, 2000, 2000]
class RandCropParam:
mode = "center" # random or center
short = 800
long = 1333
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class RandPadParam:
short = 1200
long = 2000
max_num_gt = 100
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, \
RandResize2DImageBbox, RandCrop2DImageBbox, Resize2DImageByRoidb, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord
from models.retinanet.input import Norm2DImage
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
# Resize2DImageBbox(ResizeParam),
RandResize2DImageBbox(RandResizeParam),
RandCrop2DImageBbox(RandCropParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
# Resize2DImageBbox(ResizeParam),
Resize2DImageByRoidb(),
Pad2DImageBbox(RandPadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
from models.retinanet import metric as cls_metric
import core.detection_metric as box_metric
cls_acc_metric = cls_metric.FGAccMetric(
"FGAcc",
["cls_loss_output", "point_refine_labels_output"],
[]
)
box_init_l1_metric = box_metric.L1(
"InitL1",
["pts_init_loss_output", "points_init_labels_output"],
[]
)
box_refine_l1_metric = box_metric.L1(
"RefineL1",
["pts_refine_loss_output", "point_refine_labels_output"],
[]
)
metric_list = [cls_acc_metric, box_init_l1_metric, box_refine_l1_metric]
return General, KvstoreParam, HeadParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| TuSimple/simpledet | config/RepPoints/reppoints_moment_dcn_r101v1b_fpn_multiscale_2x.py | Python | apache-2.0 | 7,766 | 0.000644 |
# coding: utf-8
from datetime import date
from django.db import models
from django.contrib.auth.models import User
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
def __unicode__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, related_name='main_concerts')
opening_band = models.ForeignKey(Band, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField()
pub_date = models.DateTimeField()
band = models.ForeignKey(Band)
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel)
__test__ = {'API_TESTS': """
>>> from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL
>>> from django.contrib.admin.sites import AdminSite
None of the following tests really depend on the content of the request, so
we'll just pass in None.
>>> request = None
# the sign_date is not 100 percent accurate ;)
>>> band = Band(name='The Doors', bio='', sign_date=date(1965, 1, 1))
>>> band.save()
Under the covers, the admin system will initialize ModelAdmin with a Model
class and an AdminSite instance, so let's just go ahead and do that manually
for testing.
>>> site = AdminSite()
>>> ma = ModelAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name', 'bio', 'sign_date']
# form/fields/fieldsets interaction ##########################################
fieldsets_add and fieldsets_change should return a special data structure that
is used in the templates. They should generate the "right thing" whether we
have specified a custom form, the fields arugment, or nothing at all.
Here's the default case. There are no custom form_add/form_change methods,
no fields argument, and no fieldsets argument.
>>> ma = ModelAdmin(Band, site)
>>> ma.get_fieldsets(request)
[(None, {'fields': ['name', 'bio', 'sign_date']})]
>>> ma.get_fieldsets(request, band)
[(None, {'fields': ['name', 'bio', 'sign_date']})]
If we specify the fields argument, fieldsets_add and fielsets_change should
just stick the fields into a formsets structure and return it.
>>> class BandAdmin(ModelAdmin):
... fields = ['name']
>>> ma = BandAdmin(Band, site)
>>> ma.get_fieldsets(request)
[(None, {'fields': ['name']})]
>>> ma.get_fieldsets(request, band)
[(None, {'fields': ['name']})]
If we specify fields or fieldsets, it should exclude fields on the Form class
to the fields specified. This may cause errors to be raised in the db layer if
required model fields arent in fields/fieldsets, but that's preferable to
ghost errors where you have a field in your Form class that isn't being
displayed because you forgot to add it to fields/fielsets
>>> class BandAdmin(ModelAdmin):
... fields = ['name']
>>> ma = BandAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name']
>>> ma.get_form(request, band).base_fields.keys()
['name']
>>> class BandAdmin(ModelAdmin):
... fieldsets = [(None, {'fields': ['name']})]
>>> ma = BandAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name']
>>> ma.get_form(request, band).base_fields.keys()
['name']
If we specify a form, it should use it allowing custom validation to work
properly. This won't, however, break any of the admin widgets or media.
>>> from django import forms
>>> class AdminBandForm(forms.ModelForm):
... delete = forms.BooleanField()
...
... class Meta:
... model = Band
>>> class BandAdmin(ModelAdmin):
... form = AdminBandForm
>>> ma = BandAdmin(Band, site)
>>> ma.get_form(request).base_fields.keys()
['name', 'bio', 'sign_date', 'delete']
>>> type(ma.get_form(request).base_fields['sign_date'].widget)
<class 'django.contrib.admin.widgets.AdminDateWidget'>
If we need to override the queryset of a ModelChoiceField in our custom form
make sure that RelatedFieldWidgetWrapper doesn't mess that up.
>>> band2 = Band(name='The Beetles', bio='', sign_date=date(1962, 1, 1))
>>> band2.save()
>>> class AdminConcertForm(forms.ModelForm):
... class Meta:
... model = Concert
...
... def __init__(self, *args, **kwargs):
... super(AdminConcertForm, self).__init__(*args, **kwargs)
... self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
>>> class ConcertAdmin(ModelAdmin):
... form = AdminConcertForm
>>> ma = ConcertAdmin(Concert, site)
>>> form = ma.get_form(request)()
>>> print form["main_band"]
<select name="main_band" id="id_main_band">
<option value="" selected="selected">---------</option>
<option value="1">The Doors</option>
</select>
>>> band2.delete()
# radio_fields behavior ################################################
First, without any radio_fields specified, the widgets for ForeignKey
and fields with choices specified ought to be a basic Select widget.
ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
they need to be handled properly when type checking. For Select fields, all of
the choices lists have a first entry of dashes.
>>> cma = ModelAdmin(Concert, site)
>>> cmafa = cma.get_form(request)
>>> type(cmafa.base_fields['main_band'].widget.widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['main_band'].widget.choices)
[(u'', u'---------'), (1, u'The Doors')]
>>> type(cmafa.base_fields['opening_band'].widget.widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['opening_band'].widget.choices)
[(u'', u'---------'), (1, u'The Doors')]
>>> type(cmafa.base_fields['day'].widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['day'].widget.choices)
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
>>> type(cmafa.base_fields['transport'].widget)
<class 'django.forms.widgets.Select'>
>>> list(cmafa.base_fields['transport'].widget.choices)
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
Now specify all the fields as radio_fields. Widgets should now be
RadioSelect, and the choices list should have a first entry of 'None' if
blank=True for the model field. Finally, the widget should have the
'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
>>> class ConcertAdmin(ModelAdmin):
... radio_fields = {
... 'main_band': HORIZONTAL,
... 'opening_band': VERTICAL,
... 'day': VERTICAL,
... 'transport': HORIZONTAL,
... }
>>> cma = ConcertAdmin(Concert, site)
>>> cmafa = cma.get_form(request)
>>> type(cmafa.base_fields['main_band'].widget.widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['main_band'].widget.attrs
{'class': 'radiolist inline'}
>>> list(cmafa.base_fields['main_band'].widget.choices)
[(1, u'The Doors')]
>>> type(cmafa.base_fields['opening_band'].widget.widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['opening_band'].widget.attrs
{'class': 'radiolist'}
>>> list(cmafa.base_fields['opening_band'].widget.choices)
[(u'', u'None'), (1, u'The Doors')]
>>> type(cmafa.base_fields['day'].widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['day'].widget.attrs
{'class': 'radiolist'}
>>> list(cmafa.base_fields['day'].widget.choices)
[(1, 'Fri'), (2, 'Sat')]
>>> type(cmafa.base_fields['transport'].widget)
<class 'django.contrib.admin.widgets.AdminRadioSelect'>
>>> cmafa.base_fields['transport'].widget.attrs
{'class': 'radiolist inline'}
>>> list(cmafa.base_fields['transport'].widget.choices)
[('', u'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
>>> band.delete()
# ModelAdmin Option Validation ################################################
>>> from django.contrib.admin.validation import validate
>>> from django.conf import settings
# Ensure validation only runs when DEBUG = True
>>> settings.DEBUG = True
>>> class ValidationTestModelAdmin(ModelAdmin):
... raw_id_fields = 10
>>> site = AdminSite()
>>> site.register(ValidationTestModel, ValidationTestModelAdmin)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.raw_id_fields` must be a list or tuple.
>>> settings.DEBUG = False
>>> class ValidationTestModelAdmin(ModelAdmin):
... raw_id_fields = 10
>>> site = AdminSite()
>>> site.register(ValidationTestModel, ValidationTestModelAdmin)
# raw_id_fields
>>> class ValidationTestModelAdmin(ModelAdmin):
... raw_id_fields = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.raw_id_fields` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... raw_id_fields = ('non_existent_field',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.raw_id_fields` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... raw_id_fields = ('name',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.raw_id_fields[0]`, `name` must be either a ForeignKey or ManyToManyField.
>>> class ValidationTestModelAdmin(ModelAdmin):
... raw_id_fields = ('users',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# fieldsets
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.fieldsets` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = ({},)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.fieldsets[0]` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = ((),)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.fieldsets[0]` does not have exactly two elements.
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = (("General", ()),)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.fieldsets[0][1]` must be a dictionary.
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = (("General", {}),)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `fields` key is required in ValidationTestModelAdmin.fieldsets[0][1] field options dict.
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = (("General", {"fields": ("non_existent_field",)}),)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.fieldsets[0][1]['fields']` refers to field `non_existent_field` that is missing from the form.
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = (("General", {"fields": ("name",)}),)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
>>> class ValidationTestModelAdmin(ModelAdmin):
... fieldsets = (("General", {"fields": ("name",)}),)
... fields = ["name",]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: Both fieldsets and fields are specified in ValidationTestModelAdmin.
# form
>>> class FakeForm(object):
... pass
>>> class ValidationTestModelAdmin(ModelAdmin):
... form = FakeForm
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: ValidationTestModelAdmin.form does not inherit from BaseModelForm.
# fielsets with custom form
>>> class BandAdmin(ModelAdmin):
... fieldsets = (
... ('Band', {
... 'fields': ('non_existent_field',)
... }),
... )
>>> validate(BandAdmin, Band)
Traceback (most recent call last):
...
ImproperlyConfigured: `BandAdmin.fieldsets[0][1]['fields']` refers to field `non_existent_field` that is missing from the form.
>>> class BandAdmin(ModelAdmin):
... fieldsets = (
... ('Band', {
... 'fields': ('name',)
... }),
... )
>>> validate(BandAdmin, Band)
>>> class AdminBandForm(forms.ModelForm):
... class Meta:
... model = Band
>>> class BandAdmin(ModelAdmin):
... form = AdminBandForm
...
... fieldsets = (
... ('Band', {
... 'fields': ('non_existent_field',)
... }),
... )
>>> validate(BandAdmin, Band)
Traceback (most recent call last):
...
ImproperlyConfigured: `BandAdmin.fieldsets[0][1]['fields']` refers to field `non_existent_field` that is missing from the form.
>>> class AdminBandForm(forms.ModelForm):
... delete = forms.BooleanField()
...
... class Meta:
... model = Band
>>> class BandAdmin(ModelAdmin):
... form = AdminBandForm
...
... fieldsets = (
... ('Band', {
... 'fields': ('name', 'bio', 'sign_date', 'delete')
... }),
... )
>>> validate(BandAdmin, Band)
# filter_vertical
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_vertical = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.filter_vertical` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_vertical = ("non_existent_field",)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.filter_vertical` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_vertical = ("name",)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.filter_vertical[0]` must be a ManyToManyField.
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_vertical = ("users",)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# filter_horizontal
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_horizontal = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.filter_horizontal` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_horizontal = ("non_existent_field",)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.filter_horizontal` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_horizontal = ("name",)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.filter_horizontal[0]` must be a ManyToManyField.
>>> class ValidationTestModelAdmin(ModelAdmin):
... filter_horizontal = ("users",)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# radio_fields
>>> class ValidationTestModelAdmin(ModelAdmin):
... radio_fields = ()
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.radio_fields` must be a dictionary.
>>> class ValidationTestModelAdmin(ModelAdmin):
... radio_fields = {"non_existent_field": None}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.radio_fields` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... radio_fields = {"name": None}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.radio_fields['name']` is neither an instance of ForeignKey nor does have choices set.
>>> class ValidationTestModelAdmin(ModelAdmin):
... radio_fields = {"state": None}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.radio_fields['state']` is neither admin.HORIZONTAL nor admin.VERTICAL.
>>> class ValidationTestModelAdmin(ModelAdmin):
... radio_fields = {"state": VERTICAL}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# prepopulated_fields
>>> class ValidationTestModelAdmin(ModelAdmin):
... prepopulated_fields = ()
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.prepopulated_fields` must be a dictionary.
>>> class ValidationTestModelAdmin(ModelAdmin):
... prepopulated_fields = {"non_existent_field": None}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.prepopulated_fields` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... prepopulated_fields = {"slug": ("non_existent_field",)}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.prepopulated_fields['non_existent_field'][0]` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... prepopulated_fields = {"users": ("name",)}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.prepopulated_fields['users']` is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.
>>> class ValidationTestModelAdmin(ModelAdmin):
... prepopulated_fields = {"slug": ("name",)}
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# list_display
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_display` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display = ('non_existent_field',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_display[0]` refers to `non_existent_field` that is neither a field, method or property of model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display = ('users',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_display[0]`, `users` is a ManyToManyField which is not supported.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display = ('name',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# list_display_links
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display_links = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_display_links` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display_links = ('non_existent_field',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_display_links[0]` refers to `non_existent_field` that is neither a field, method or property of model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display_links = ('name',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_display_links[0]`refers to `name` which is not defined in `list_display`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_display = ('name',)
... list_display_links = ('name',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# list_filter
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_filter = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_filter` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_filter = ('non_existent_field',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_filter[0]` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_filter = ('is_active',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# list_per_page
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_per_page = 'hello'
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_per_page` should be a integer.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_per_page = 100
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# search_fields
>>> class ValidationTestModelAdmin(ModelAdmin):
... search_fields = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.search_fields` must be a list or tuple.
# date_hierarchy
>>> class ValidationTestModelAdmin(ModelAdmin):
... date_hierarchy = 'non_existent_field'
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.date_hierarchy` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... date_hierarchy = 'name'
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.
>>> class ValidationTestModelAdmin(ModelAdmin):
... date_hierarchy = 'pub_date'
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# ordering
>>> class ValidationTestModelAdmin(ModelAdmin):
... ordering = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.ordering` must be a list or tuple.
>>> class ValidationTestModelAdmin(ModelAdmin):
... ordering = ('non_existent_field',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.ordering[0]` refers to field `non_existent_field` that is missing from model `ValidationTestModel`.
>>> class ValidationTestModelAdmin(ModelAdmin):
... ordering = ('?', 'name')
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.ordering` has the random ordering marker `?`, but contains other fields as well. Please either remove `?` or the other fields.
>>> class ValidationTestModelAdmin(ModelAdmin):
... ordering = ('?',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
>>> class ValidationTestModelAdmin(ModelAdmin):
... ordering = ('band__name',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
>>> class ValidationTestModelAdmin(ModelAdmin):
... ordering = ('name',)
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# list_select_related
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_select_related = 1
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.list_select_related` should be a boolean.
>>> class ValidationTestModelAdmin(ModelAdmin):
... list_select_related = False
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# save_as
>>> class ValidationTestModelAdmin(ModelAdmin):
... save_as = 1
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.save_as` should be a boolean.
>>> class ValidationTestModelAdmin(ModelAdmin):
... save_as = True
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# save_on_top
>>> class ValidationTestModelAdmin(ModelAdmin):
... save_on_top = 1
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.save_on_top` should be a boolean.
>>> class ValidationTestModelAdmin(ModelAdmin):
... save_on_top = True
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# inlines
>>> from django.contrib.admin.options import TabularInline
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = 10
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.inlines` must be a list or tuple.
>>> class ValidationTestInline(object):
... pass
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.inlines[0]` does not inherit from BaseModelAdmin.
>>> class ValidationTestInline(TabularInline):
... pass
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `model` is a required attribute of `ValidationTestModelAdmin.inlines[0]`.
>>> class SomethingBad(object):
... pass
>>> class ValidationTestInline(TabularInline):
... model = SomethingBad
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestModelAdmin.inlines[0].model` does not inherit from models.Model.
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# fields
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... fields = 10
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestInline.fields` must be a list or tuple.
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... fields = ("non_existent_field",)
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestInline.fields` refers to field `non_existent_field` that is missing from the form.
# fk_name
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... fk_name = "non_existent_field"
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestInline.fk_name` refers to field `non_existent_field` that is missing from model `ValidationTestInlineModel`.
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... fk_name = "parent"
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# extra
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... extra = "hello"
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestInline.extra` should be a integer.
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... extra = 2
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# max_num
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... max_num = "hello"
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestInline.max_num` should be a integer.
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... max_num = 2
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
# formset
>>> from django.forms.models import BaseModelFormSet
>>> class FakeFormSet(object):
... pass
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... formset = FakeFormSet
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
Traceback (most recent call last):
...
ImproperlyConfigured: `ValidationTestInline.formset` does not inherit from BaseModelFormSet.
>>> class RealModelFormSet(BaseModelFormSet):
... pass
>>> class ValidationTestInline(TabularInline):
... model = ValidationTestInlineModel
... formset = RealModelFormSet
>>> class ValidationTestModelAdmin(ModelAdmin):
... inlines = [ValidationTestInline]
>>> validate(ValidationTestModelAdmin, ValidationTestModel)
"""
}
| thomazs/geraldo | site/newsite/django_1_0/tests/regressiontests/modeladmin/models.py | Python | lgpl-3.0 | 31,422 | 0.002005 |
import os
from functools import partial
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayer
from qgis.gui import QgsExpressionBuilderDialog
from roam.api.utils import layer_by_name
from configmanager.models import QgsLayerModel, QgsFieldModel
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_listwidget_config import Ui_Form
class ListWidgetConfig(Ui_Form, ConfigWidget):
description = 'Select an item from a predefined list'
def __init__(self, parent=None):
super(ListWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.allownull = False
self.orderby = False
self.orderbyCheck.hide()
self.layerRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 0))
self.listRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 1))
self.layermodel = QgsLayerModel(watchregistry=False)
self.layermodel.layerfilter = [QgsMapLayer.VectorLayer]
self.fieldmodel = QgsFieldModel()
self.blockSignals(True)
self.layerCombo.setModel(self.layermodel)
self.keyCombo.setModel(self.fieldmodel)
self.valueCombo.setModel(self.fieldmodel)
self.filterButton.pressed.connect(self.define_filter)
self.fieldmodel.setLayerFilter(self.layerCombo.view().selectionModel())
self.reset()
self.blockSignals(False)
def define_filter(self):
layer = self.layerCombo.currentText()
if not layer:
return
layer = layer_by_name(layer)
dlg = QgsExpressionBuilderDialog(layer, "List filter", self)
text = self.filterText.toPlainText()
dlg.setExpressionText(text)
if dlg.exec_():
self.filterText.setPlainText(dlg.expressionText())
def reset(self):
self.listtype = 'layer'
self.listText.setPlainText('')
self.orderby = False
self.allownull = False
self.filterText.setPlainText('')
self.layerCombo.setCurrentIndex(-1)
self.keyCombo.setCurrentIndex(-1)
self.valueCombo.setCurrentIndex(-1)
def widgetchanged(self):
self.widgetdirty.emit(self.getconfig())
@property
def allownull(self):
return self.allownullCheck.isChecked()
@allownull.setter
def allownull(self, value):
self.allownullCheck.setChecked(value)
@property
def orderby(self):
return self.orderbyCheck.isChecked()
@orderby.setter
def orderby(self, value):
self.orderbyCheck.setChecked(value)
@property
def list(self):
return [item for item in self.listText.toPlainText().split('\n')]
@property
def filter(self):
return self.filterText.toPlainText()
@property
def layer(self):
return self.layerCombo.currentText()
@property
def key(self):
index_key = self.fieldmodel.index(self.keyCombo.currentIndex(), 0)
fieldname_key = self.fieldmodel.data(index_key, QgsFieldModel.FieldNameRole)
return fieldname_key
@property
def value(self):
index_value = self.fieldmodel.index(self.valueCombo.currentIndex(), 0)
return self.fieldmodel.data(index_value, QgsFieldModel.FieldNameRole)
def getconfig(self):
config = {}
config['allownull'] = self.allownull
config['orderbyvalue'] = self.orderby
if self.layerRadio.isChecked():
subconfig = {}
# TODO Grab the data here and not just the text
subconfig['layer'] = self.layer
subconfig['key'] = self.key
subconfig['value'] = self.value
subconfig['filter'] = self.filter
config['layer'] = subconfig
else:
config['list'] = {}
config['list']['items'] = self.list
return config
def blockSignals(self, bool):
for child in self.findChildren(QWidget):
child.blockSignals(bool)
super(ListWidgetConfig, self).blockSignals(bool)
def setconfig(self, config):
self.blockSignals(True)
self.allownull = config.get('allownull', True)
self.orderby = config.get('orderbyvalue', False)
#Clear the widgets
self.listText.setPlainText('')
self.keyCombo.clear()
self.valueCombo.clear()
self.filterText.clear()
self.layermodel.refresh()
# Rebind all the values
if 'list' in config:
subconfig = config.get('list', {})
self.listRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(1)
listitems = subconfig.get('items', [])
itemtext = '\n'.join(listitems)
self.listText.setPlainText(itemtext)
else:
self.layerRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(0)
subconfig = config.get('layer', {})
layer = subconfig.get('layer', '') or ''
key = subconfig.get('key', '') or ''
value = subconfig.get('value', '') or ''
filter = subconfig.get('filter', None)
index = self.layerCombo.findData(layer, Qt.DisplayRole)
if index > -1:
self.layerCombo.setCurrentIndex(index)
index = self.layermodel.index(index, 0)
self.fieldmodel.updateLayer(index, None)
keyindex = self.keyCombo.findData(key.lower(), QgsFieldModel.FieldNameRole)
if keyindex > -1:
self.keyCombo.setCurrentIndex(keyindex)
valueindex = self.valueCombo.findData(value.lower(), QgsFieldModel.FieldNameRole)
if valueindex > -1:
self.valueCombo.setCurrentIndex(valueindex)
self.filterText.setPlainText(filter)
self.allownullCheck.setChecked(self.allownull)
self.orderbyCheck.setChecked(self.orderby)
self.blockSignals(False)
| lmotta/Roam | src/configmanager/editorwidgets/listwidget.py | Python | gpl-2.0 | 5,997 | 0.001501 |
from typing import List, Optional
from fastapi import FastAPI, Header
app = FastAPI()
@app.get("/items/")
async def read_items(x_token: Optional[List[str]] = Header(None)):
return {"X-Token values": x_token}
| tiangolo/fastapi | docs_src/header_params/tutorial003.py | Python | mit | 216 | 0 |
import re
class Challenges:
@staticmethod
def first_factorial(number: int) -> int:
"""
Iterative approach
:param number: an input, first factorial of a number
:return: factorial
"""
found = 1
step = 2
while step <= number:
found *= step
step += 1
return found
@staticmethod
def longest_word(sentence: str) -> str:
"""
Detect longest word in a sentence
:param sentence:
:return:
"""
trimmed = re.compile('[^a-zA-Z0-9 ]').sub('', sentence)
chunks = trimmed.split(' ')
longest = 0
index = -1
for i, x in enumerate(chunks):
if len(x) > longest:
longest = len(x)
index = i
return chunks[index]
@staticmethod
def letter_mutation(string):
"""
Coderbyte challenge: Letter Changes
:param string: a sentence
:return: str, transformed sentence
"""
alphabet = list(range(97, 123))
alphabet_len = len(alphabet) - 1
ret = ''
vowels = list('aeiou')
for x in list(string):
r = x
if ord(x) in alphabet:
if ord(x) == alphabet[alphabet_len]:
r = chr(alphabet[0])
else:
r = chr(ord(x) + 1)
if r in vowels:
r = r.upper()
ret += r
return ret
| rlishtaba/py-algorithms | py_algorithms/challenges/challenges.py | Python | mit | 1,505 | 0 |
import sys
from starstoloves.models import User as UserModel
from starstoloves import model_repository
from starstoloves.lib.track import lastfm_track_repository
from .user import User
def from_session_key(session_key):
user_model, created = UserModel.objects.get_or_create(session_key=session_key)
return User(
session_key=session_key,
repository=sys.modules[__name__],
);
def delete(user):
try:
user_model = model_repository.from_user(user)
user_model.delete()
except UserModel.DoesNotExist:
pass;
| tdhooper/starstoloves | starstoloves/lib/user/user_repository.py | Python | gpl-2.0 | 564 | 0.007092 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.utils.translation import ugettext_lazy as _
from django_lets_go.utils import Choice
class DNC_COLUMN_NAME(Choice):
id = _('ID')
name = _('name')
date = _('date')
contacts = _('contacts')
class DNC_CONTACT_COLUMN_NAME(Choice):
id = _('ID')
dnc = _('DNC')
phone_number = _('phone number')
date = _('date')
| romonzaman/newfies-dialer | newfies/dnc/constants.py | Python | mpl-2.0 | 748 | 0 |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import array
import shlex
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
('/etc/slackware-version', 'Slackware'),
('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
('/etc/alpine-release', 'Alpine'),
('/etc/release', 'Solaris'),
('/etc/arch-release', 'Archlinux'),
('/etc/SuSE-release', 'SuSE'),
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva'),
('/etc/os-release', 'NA'),
)
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
]
def __init__(self, load_on_init=True):
self.facts = {}
if load_on_init:
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
if module.get_bin_path('getconf'):
rc, out, err = module.run_command([module.get_bin_path('getconf'),
'MACHINE_ARCHITECTURE'])
data = out.split('\n')
self.facts['architecture'] = data[0]
else:
rc, out, err = module.run_command([module.get_bin_path('bootinfo'),
'-p'])
data = out.split('\n')
self.facts['architecture'] = data[0]
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
def get_local_facts(self):
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
rc, out, err = module.run_command(fn)
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = ConfigParser.ConfigParser()
try:
cp.readfp(StringIO.StringIO(out))
except ConfigParser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
#print cp.sections()
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
# platform.dist() is deprecated in 2.6
# in 2.6 and newer, you should use platform.linux_distribution()
def get_distribution_facts(self):
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
)
# TODO: Rewrite this to use the function references in a dict pattern
# as it's much cleaner than this massive if-else
if self.facts['system'] == 'AIX':
self.facts['distribution'] = 'AIX'
rc, out, err = module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
elif self.facts['system'] == 'HP-UX':
self.facts['distribution'] = 'HP-UX'
rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
elif self.facts['system'] == 'Darwin':
self.facts['distribution'] = 'MacOSX'
rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
elif self.facts['system'] == 'FreeBSD':
self.facts['distribution'] = 'FreeBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'NetBSD':
self.facts['distribution'] = 'NetBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'OpenBSD':
self.facts['distribution'] = 'OpenBSD'
self.facts['distribution_release'] = platform.release()
rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_version'] = match.groups()[0]
else:
self.facts['distribution_version'] = 'release'
else:
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
for (path, name) in Facts.OSDIST_LIST:
if os.path.exists(path):
if os.path.getsize(path) > 0:
if self.facts['distribution'] in ('Fedora', ):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
elif name == 'Archlinux':
data = get_file_content(path)
if 'Arch Linux' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'Slackware':
data = get_file_content(path)
if 'Slackware' in data:
self.facts['distribution'] = name
version = re.findall('\w+[.]\w+', data)
if version:
self.facts['distribution_version'] = version[0]
break
elif name == 'OracleLinux':
data = get_file_content(path)
if 'Oracle Linux' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'OtherLinux':
data = get_file_content(path)
if 'Amazon' in data:
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
break
elif name == 'OpenWrt':
data = get_file_content(path)
if 'OpenWrt' in data:
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif name == 'Alpine':
data = get_file_content(path)
self.facts['distribution'] = name
self.facts['distribution_version'] = data
break
elif name == 'Solaris':
data = get_file_content(path).split('\n')[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
break
uname_rc, uname_out, uname_err = module.run_command(['uname', '-v'])
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_rc == 0 and 'NexentaOS_' in uname_out:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_rc == 0:
self.facts['distribution_version'] = uname_out.split('\n')[0].strip()
break
elif name == 'SuSE':
data = get_file_content(path)
if 'suse' in data.lower():
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) # example pattern are 13.04 13.0 13
if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower():
release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower():
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) # SLES doesn't got funny release names
if release:
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
self.facts['distribution_release'] = release
break
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
self.facts['distribution'] = "SLES"
elif "Desktop" in data:
self.facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
self.facts['distribution_release'] = release.group(1)
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
elif name == 'Debian':
data = get_file_content(path)
if 'Debian' in data or 'Raspbian' in data:
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif 'Ubuntu' in data:
break # Ubuntu gets correct info from python functions
elif name == 'Mandriva':
data = get_file_content(path)
if 'Mandriva' in data:
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
break
elif name == 'NA':
data = get_file_content(path)
for line in data.splitlines():
if self.facts['distribution'] == 'NA':
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
if self.facts['distribution_version'] == 'NA':
version = re.search("^VERSION=(.*)", line)
if version:
self.facts['distribution_version'] = version.group(1).strip('"')
if self.facts['distribution'].lower() == 'coreos':
data = get_file_content('/etc/coreos/update.conf')
release = re.search("^GROUP=(.*)", data)
if release:
self.facts['distribution_release'] = release.group(1).strip('"')
else:
self.facts['distribution'] = name
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.split('\n')[0]
self.facts["machine_id"] = machine_id
self.facts['os_family'] = self.facts['distribution']
if self.facts['distribution'] in OS_FAMILY:
self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError:
pass
def get_public_ssh_host_keys(self):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
if self.facts['system'] == 'Darwin':
if self.facts['distribution'] == 'MacOSX' and LooseVersion(self.facts['distribution_version']) >= LooseVersion('10.11') :
keydir = '/etc/ssh'
else:
keydir = '/etc'
else:
keydir = '/etc/ssh'
for type_ in keytypes:
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
keydata = get_file_content(key_filename)
if keydata is not None:
factname = 'ssh_host_key_%s_public' % type_
self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
def get_service_mgr_facts(self):
#TODO: detect more custom init setups like bootscripts, dmd, s6, etc
# also other OSs other than linux might need to check across several possible candidates
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
if proc_1 in ['init', '/sbin/init']:
# many systems return init, so this cannot be trusted
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
self.facts['service_mgr'] = proc_1
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd'
else:
self.facts['service_mgr'] = 'systemstarter'
elif self.facts['system'].endswith('BSD') or self.facts['system'] in ['Bitrig', 'DragonFly']:
#FIXME: we might want to break out to individual BSDs
self.facts['service_mgr'] = 'bsdinit'
elif self.facts['system'] == 'AIX':
self.facts['service_mgr'] = 'src'
elif self.facts['system'] == 'SunOS':
#FIXME: smf?
self.facts['service_mgr'] = 'svcs'
elif self.facts['system'] == 'Linux':
if self._check_systemd():
self.facts['service_mgr'] = 'systemd'
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
self.facts['service_mgr'] = 'upstart'
elif module.get_bin_path('rc-service'):
self.facts['service_mgr'] = 'openrc'
elif os.path.exists('/etc/init.d/'):
self.facts['service_mgr'] = 'sysvinit'
if not self.facts.get('service_mgr', False):
# if we cannot detect, fallback to generic 'service'
self.facts['service_mgr'] = 'service'
def get_lsb_facts(self):
lsb_path = module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = module.run_command([lsb_path, "-a"])
if rc == 0:
self.facts['lsb'] = {}
for line in out.split('\n'):
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
for line in get_file_lines('/etc/lsb-release'):
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
else:
return self.facts
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except OSError:
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except OSError:
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except OSError:
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except OSError:
self.facts['selinux']['type'] = 'unknown'
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['weekday_number'] = now.strftime('%w')
self.facts['date_time']['weeknumber'] = now.strftime('%W')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
def _check_systemd(self):
# tools must be installed
if module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
self.facts['user_uid'] = pwent.pw_uid
self.facts['user_gid'] = pwent.pw_gid
self.facts['user_gecos'] = pwent.pw_gecos
self.facts['user_dir'] = pwent.pw_dir
self.facts['user_shell'] = pwent.pw_shell
def get_env_facts(self):
self.facts['env'] = {}
for k,v in os.environ.iteritems():
self.facts['env'][k] = v
def get_dns_facts(self):
self.facts['dns'] = {}
for line in get_file_content('/etc/resolv.conf', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
self.facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
self.facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
self.facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
self.facts['dns']['search'] = []
for suffix in tokens[1:]:
self.facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
self.facts['dns']['sortlist'] = []
for address in tokens[1:]:
self.facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
self.facts['dns']['options'] = {}
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
self.facts['dns']['options'][option_tokens[0]] = val
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Hardware.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
self.get_lvm_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = long(val) / 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
self.facts['memory_mb'] = {
'real' : {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache' : {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap' : {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
self.facts['processor_count'] = sockets and len(sockets) or i
self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
self.facts['processor_threads_per_core'] = ((cores.values() and
cores.values()[0] or 1) / self.facts['processor_cores'])
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
if line.startswith('/'):
fields = line.rstrip('\n').split()
if(fields[2] != 'none'):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(fields[1])
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
except OSError:
continue
uuid = 'NA'
lsblkPath = module.get_bin_path("lsblk")
if lsblkPath:
rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True)
if rc == 0:
uuid = out.strip()
self.facts['mounts'].append(
{'mount': fields[1],
'device':fields[0],
'fstype': fields[2],
'options': fields[3],
# statvfs data
'size_total': size_total,
'size_available': size_available,
'uuid': uuid,
})
def get_device_facts(self):
self.facts['devices'] = {}
lspci = module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = module.run_command([lspci, '-D'])
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError, e:
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
d['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
d['holders'].append(name)
else:
d['holders'].append(folder)
self.facts['devices'][diskname] = d
def get_uptime_facts(self):
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
if os.getuid() == 0 and module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g'
vgs_path = module.get_bin_path('vgs')
#vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs={}
if vgs_path:
rc, vg_lines, err = module.run_command( '%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.split()
vgs[items[0]] = {'size_g':items[-2],
'free_g':items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = module.get_bin_path('lvs')
#lvs fields:
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = module.run_command( '%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.split()
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs}
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.split('\n'):
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = module.run_command(["/usr/sbin/prtconf"])
for line in out.split('\n'):
if 'Memory size' in line:
self.facts['memtotal_mb'] = line.split()[2]
rc, out, err = module.run_command("/usr/sbin/swap -s")
allocated = long(out.split()[1][:-1])
reserved = long(out.split()[5][:-1])
used = long(out.split()[8][:-1])
free = long(out.split()[10][:-1])
self.facts['swapfree_mb'] = free / 1024
self.facts['swaptotal_mb'] = (free + used) / 1024
self.facts['swap_allocated_mb'] = allocated / 1024
self.facts['swap_reserved_mb'] = reserved / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.split('\n'):
fields = line.rstrip('\n').split('\t')
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4]})
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- devices
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
self.get_mount_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
(key, value) = line.split('=')
sysctl[key] = value.strip()
return sysctl
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = maketrans(' ', ' ')
data = out.split()
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
def get_processor_facts(self):
processor = []
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
i = 0
for line in dmesg_boot.splitlines():
if line.split(' ', 1)[0] == 'cpu%i:' % i:
processor.append(line.split(' ', 1)[1])
i = i + 1
processor_count = i
self.facts['processor'] = processor
self.facts['processor_count'] = processor_count
# I found no way to figure out the number of Cores per CPU in OpenBSD
self.facts['processor_cores'] = 'NA'
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
for line in dmesg_boot.split('\n'):
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = module.run_command("/sbin/sysctl vm.stats")
for line in out.split('\n'):
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = long(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = long(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = long(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/swapinfo -k")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
self.facts['swaptotal_mb'] = int(data[1]) / 1024
self.facts['swapfree_mb'] = int(data[3]) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class DragonFlyHardware(FreeBSDHardware):
pass
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.split('\n'):
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat -v")
for line in out.split('\n'):
data = line.split()
if 'memory pages' in line:
pagecount = long(data[0])
if 'free pages' in line:
freecount = long(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.split('\n')
data = lines[1].split()
swaptotal_mb = long(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
class HPUX(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) / 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().split('\n'):
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if line.rstrip("\n"):
(key, value) = re.split(' = |: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
def get_system_profile(self):
rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
rc, out, err = module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Network.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self, module):
self.module = module
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.split('\n')[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
# if os.path.exists(os.path.join(path, 'carrier')):
# interfaces[device]['link'] = get_file_content(os.path.join(path, 'carrier')) == '1'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
if _type == '1':
interfaces[device]['type'] = 'ether'
elif _type == '512':
interfaces[device]['type'] = 'ppp'
elif _type == '772':
interfaces[device]['type'] = 'loopback'
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path,'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.split('\n'):
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, stdout, stderr = self.module.run_command(args)
primary_data = stdout
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, stdout, stderr = self.module.run_command(args)
secondary_data = stdout
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
It currently does not define
- default_ipv4 and default_ipv6
- type, mtu and network on interfaces
"""
platform = 'Generic_BSD_Ifconfig'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ifconfig_path = module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
lines = out.split('\n')
for line in lines:
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if not 'interface' in defaults.keys():
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo.keys():
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
class HPUXNetwork(Network):
"""
HP-UX-specifig subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4 address information.
"""
platform = 'HP-UX'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return self.facts
self.get_default_interfaces()
interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
return self.facts
def get_default_interfaces(self):
rc, out, err = module.run_command("/usr/bin/netstat -nr")
lines = out.split('\n')
for line in lines:
words = line.split()
if len(words) > 1:
if words[0] == 'default':
self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1]
def get_interfaces_info(self):
interfaces = {}
rc, out, err = module.run_command("/usr/bin/netstat -ni")
lines = out.split('\n')
for line in lines:
words = line.split()
for i in range(len(words) - 1):
if words[i][:3] == 'lan':
device = words[i]
interfaces[device] = { 'device': device }
address = words[i+3]
interfaces[device]['ipv4'] = { 'address': address }
network = words[i+2]
interfaces[device]['ipv4'] = { 'network': network,
'interface': device,
'address': address }
return interfaces
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class DragonFlyNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the DragonFly Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'DragonFly'
class AIXNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = module.get_bin_path('netstat')
rc, out, err = module.run_command([netstat_path, '-nr'])
interface = dict(v4 = {}, v6 = {})
lines = out.split('\n')
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
uname_path = module.get_bin_path('uname')
if uname_path:
rc, out, err = module.run_command([uname_path, '-W'])
# don't bother with wpars it does not work
# zero means not in wpar
if not rc and out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = module.get_bin_path('entstat')
if entstat_path:
rc, out, err = module.run_command([entstat_path, current_if['device'] ])
if rc != 0:
break
for line in out.split('\n'):
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = module.run_command([lsattr_path,'-El', current_if['device'] ])
if rc != 0:
break
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Virtual.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
self.facts['virtualization_type'] = systemd_container
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = module.get_bin_path('lscpu')
if lscpu:
rc, out, err = module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class DragonFlyVirtual(FreeBSDVirtual):
pass
class OpenBSDVirtual(Virtual):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
if os.path.exists("/usr/sbin/virtinfo"):
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
rc, out, err = module.run_command("/usr/sbin/virtinfo -p")
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
# virtinfo can only be run from the global zone
try:
for line in out.split('\n'):
fields = line.split('|')
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
self.facts['virtualization_type'] = 'ldom'
self.facts['virtualization_role'] = 'guest'
hostfeatures = []
for field in fields[2:]:
arg = field.split('=')
if( arg[1] == 'true' ):
hostfeatures.append(arg[0])
if( len(hostfeatures) > 0 ):
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
except ValueError:
pass
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
return data
def get_file_lines(path):
'''get list of lines from file'''
data = get_file_content(path)
if data:
ret = data.splitlines()
else:
ret = []
return ret
def ansible_facts(module):
facts = {}
facts.update(Facts().populate())
facts.update(Hardware().populate())
facts.update(Network(module).populate())
facts.update(Virtual().populate())
return facts
# ===========================================
def get_all_facts(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter, cfacter, and ohai binaries and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
cfacter_path = module.get_bin_path('cfacter')
ohai_path = module.get_bin_path('ohai')
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['_ansible_verbose_override'] = True
return setup_result
| fossilet/ansible | lib/ansible/module_utils/facts.py | Python | gpl-3.0 | 132,636 | 0.004976 |
import dataclasses
from typing import Dict, Mapping, Optional, Tuple
import numpy as np
from stable_baselines3.common import vec_env
from imitation.data import types
class Buffer:
"""A FIFO ring buffer for NumPy arrays of a fixed shape and dtype.
Supports random sampling with replacement.
"""
capacity: int
"""The number of data samples that can be stored in this buffer."""
sample_shapes: Dict[str, Tuple[int, ...]]
"""The shapes of each data sample stored in this buffer."""
_arrays: Dict[str, np.ndarray]
"""The underlying NumPy arrays (which actually store the data)."""
_n_data: int
"""The number of samples currently stored in this buffer.
An integer in `range(0, self.capacity + 1)`. This attribute is the return
value of `self.size()`.
"""
_idx: int
"""The index of the first row that new data should be written to.
An integer in `range(0, self.capacity)`.
"""
def __init__(
self,
capacity: int,
sample_shapes: Mapping[str, Tuple[int, ...]],
dtypes: Mapping[str, np.dtype],
):
"""Constructs a Buffer.
Args:
capacity: The number of samples that can be stored.
sample_shapes: A dictionary mapping string keys to the shape of
samples associated with that key.
dtypes (`np.dtype`-like): A dictionary mapping string keys to the dtype
of samples associated with that key.
Raises:
KeyError: `sample_shapes` and `dtypes` have different keys.
"""
if sample_shapes.keys() != dtypes.keys():
raise KeyError("sample_shape and dtypes keys don't match")
self.capacity = capacity
self.sample_shapes = {k: tuple(shape) for k, shape in sample_shapes.items()}
self._arrays = {
k: np.zeros((capacity,) + shape, dtype=dtypes[k])
for k, shape in self.sample_shapes.items()
}
self._n_data = 0
self._idx = 0
@classmethod
def from_data(
cls,
data: Dict[str, np.ndarray],
capacity: Optional[int] = None,
truncate_ok: bool = False,
) -> "Buffer":
"""Constructs and return a Buffer containing the provided data.
Shapes and dtypes are automatically inferred.
Args:
data: A dictionary mapping keys to data arrays. The arrays may differ
in their shape, but should agree in the first axis.
capacity: The Buffer capacity. If not provided, then this is automatically
set to the size of the data, so that the returned Buffer is at full
capacity.
truncate_ok: Whether to error if `capacity` < the number of samples in
`data`. If False, then only store the last `capacity` samples from
`data` when overcapacity.
Examples:
In the follow examples, suppose the arrays in `data` are length-1000.
`Buffer` with same capacity as arrays in `data`::
Buffer.from_data(data)
`Buffer` with larger capacity than arrays in `data`::
Buffer.from_data(data, 10000)
`Buffer with smaller capacity than arrays in `data`. Without
`truncate_ok=True`, `from_data` will error::
Buffer.from_data(data, 5, truncate_ok=True)
Raises:
ValueError: `data` is empty.
ValueError: `data` has items mapping to arrays differing in the
length of their first axis.
"""
data_capacities = [arr.shape[0] for arr in data.values()]
data_capacities = np.unique(data_capacities)
if len(data) == 0:
raise ValueError("No keys in data.")
if len(data_capacities) > 1:
raise ValueError("Keys map to different length values")
if capacity is None:
capacity = data_capacities[0]
sample_shapes = {k: arr.shape[1:] for k, arr in data.items()}
dtypes = {k: arr.dtype for k, arr in data.items()}
buf = cls(capacity, sample_shapes, dtypes)
buf.store(data, truncate_ok=truncate_ok)
return buf
def store(self, data: Dict[str, np.ndarray], truncate_ok: bool = False) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Args:
data: A dictionary mapping keys `k` to arrays with shape
`(n_samples,) + self.sample_shapes[k]`, where `n_samples` is less
than or equal to `self.capacity`.
truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`. Otherwise, store only the final
`self.capacity` transitions.
Raises:
ValueError: `data` is empty.
ValueError: If `n_samples` is greater than `self.capacity`.
ValueError: data is the wrong shape.
"""
expected_keys = set(self.sample_shapes.keys())
missing_keys = expected_keys.difference(data.keys())
unexpected_keys = set(data.keys()).difference(expected_keys)
if len(missing_keys) > 0:
raise ValueError(f"Missing keys {missing_keys}")
if len(unexpected_keys) > 0:
raise ValueError(f"Unexpected keys {unexpected_keys}")
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
if len(n_samples) > 1:
raise ValueError("Keys map to different length values.")
n_samples = n_samples[0]
if n_samples == 0:
raise ValueError("Trying to store empty data.")
if n_samples > self.capacity:
if not truncate_ok:
raise ValueError("Not enough capacity to store data.")
else:
data = {k: arr[-self.capacity :] for k, arr in data.items()}
for k, arr in data.items():
if arr.shape[1:] != self.sample_shapes[k]:
raise ValueError(f"Wrong data shape for {k}")
new_idx = self._idx + n_samples
if new_idx > self.capacity:
n_remain = self.capacity - self._idx
# Need to loop around the buffer. Break into two "easy" calls.
self._store_easy({k: arr[:n_remain] for k, arr in data.items()})
assert self._idx == 0
self._store_easy({k: arr[n_remain:] for k, arr in data.items()})
else:
self._store_easy(data)
def _store_easy(self, data: Dict[str, np.ndarray]) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Requires that `size(data) <= self.capacity - self._idx`, where `size(data)` is
the number of rows in every array in `data.values()`. Updates `self._idx`
to be the insertion point of the next call to `_store_easy` call,
looping back to `self._idx = 0` if necessary.
Also updates `self._n_data`.
Args:
data: Same as in `self.store`'s docstring, except with the additional
constraint `size(data) <= self.capacity - self._idx`.
"""
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
assert len(n_samples) == 1
n_samples = n_samples[0]
assert n_samples <= self.capacity - self._idx
idx_hi = self._idx + n_samples
for k, arr in data.items():
self._arrays[k][self._idx : idx_hi] = arr
self._idx = idx_hi % self.capacity
self._n_data = min(self._n_data + n_samples, self.capacity)
def sample(self, n_samples: int) -> Dict[str, np.ndarray]:
"""Uniformly sample `n_samples` samples from the buffer with replacement.
Args:
n_samples: The number of samples to randomly sample.
Returns:
samples (np.ndarray): An array with shape
`(n_samples) + self.sample_shape`.
Raises:
ValueError: The buffer is empty.
"""
if self.size() == 0:
raise ValueError("Buffer is empty")
ind = np.random.randint(self.size(), size=n_samples)
return {k: buffer[ind] for k, buffer in self._arrays.items()}
def size(self) -> Optional[int]:
"""Returns the number of samples stored in the buffer."""
assert 0 <= self._n_data <= self.capacity
return self._n_data
class ReplayBuffer:
"""Buffer for Transitions."""
capacity: int
"""The number of data samples that can be stored in this buffer."""
def __init__(
self,
capacity: int,
venv: Optional[vec_env.VecEnv] = None,
*,
obs_shape: Optional[Tuple[int, ...]] = None,
act_shape: Optional[Tuple[int, ...]] = None,
obs_dtype: Optional[np.dtype] = None,
act_dtype: Optional[np.dtype] = None,
):
"""Constructs a ReplayBuffer.
Args:
capacity: The number of samples that can be stored.
venv: The environment whose action and observation
spaces can be used to determine the data shapes of the underlying
buffers. Overrides all the following arguments.
obs_shape: The shape of the observation space.
act_shape: The shape of the action space.
obs_dtype: The dtype of the observation space.
act_dtype: The dtype of the action space.
Raises:
ValueError: Couldn't infer the observation and action shapes and dtypes
from the arguments.
"""
params = [obs_shape, act_shape, obs_dtype, act_dtype]
if venv is not None:
if np.any([x is not None for x in params]):
raise ValueError("Specified shape or dtype and environment.")
obs_shape = tuple(venv.observation_space.shape)
act_shape = tuple(venv.action_space.shape)
obs_dtype = venv.observation_space.dtype
act_dtype = venv.action_space.dtype
else:
if np.any([x is None for x in params]):
raise ValueError("Shape or dtype missing and no environment specified.")
self.capacity = capacity
sample_shapes = {
"obs": obs_shape,
"acts": act_shape,
"next_obs": obs_shape,
"dones": (),
"infos": (),
}
dtypes = {
"obs": obs_dtype,
"acts": act_dtype,
"next_obs": obs_dtype,
"dones": bool,
"infos": np.object,
}
self._buffer = Buffer(capacity, sample_shapes=sample_shapes, dtypes=dtypes)
@classmethod
def from_data(
cls,
transitions: types.Transitions,
capacity: Optional[int] = None,
truncate_ok: bool = False,
) -> "ReplayBuffer":
"""Construct and return a ReplayBuffer containing the provided data.
Shapes and dtypes are automatically inferred, and the returned ReplayBuffer is
ready for sampling.
Args:
transitions: Transitions to store.
capacity: The ReplayBuffer capacity. If not provided, then this is
automatically set to the size of the data, so that the returned Buffer
is at full capacity.
truncate_ok: Whether to error if `capacity` < the number of samples in
`data`. If False, then only store the last `capacity` samples from
`data` when overcapacity.
Examples:
`ReplayBuffer` with same capacity as arrays in `data`::
ReplayBuffer.from_data(data)
`ReplayBuffer` with larger capacity than arrays in `data`::
ReplayBuffer.from_data(data, 10000)
`ReplayBuffer with smaller capacity than arrays in `data`. Without
`truncate_ok=True`, `from_data` will error::
ReplayBuffer.from_data(data, 5, truncate_ok=True)
Returns:
A new ReplayBuffer.
"""
obs_shape = transitions.obs.shape[1:]
act_shape = transitions.acts.shape[1:]
if capacity is None:
capacity = transitions.obs.shape[0]
instance = cls(
capacity=capacity,
obs_shape=obs_shape,
act_shape=act_shape,
obs_dtype=transitions.obs.dtype,
act_dtype=transitions.acts.dtype,
)
instance.store(transitions, truncate_ok=truncate_ok)
return instance
def sample(self, n_samples: int) -> types.Transitions:
"""Sample obs-act-obs triples.
Args:
n_samples: The number of samples.
Returns:
A Transitions named tuple containing n_samples transitions.
"""
sample = self._buffer.sample(n_samples)
return types.Transitions(**sample)
def store(self, transitions: types.Transitions, truncate_ok: bool = True) -> None:
"""Store obs-act-obs triples.
Args:
transitions: Transitions to store.
truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`. Otherwise, store only the final
`self.capacity` transitions.
Raises:
ValueError: The arguments didn't have the same length.
"""
trans_dict = dataclasses.asdict(transitions)
# Remove unnecessary fields
trans_dict = {k: trans_dict[k] for k in self._buffer.sample_shapes.keys()}
self._buffer.store(trans_dict, truncate_ok=truncate_ok)
def size(self) -> Optional[int]:
"""Returns the number of samples stored in the buffer."""
return self._buffer.size()
| humancompatibleai/imitation | src/imitation/data/buffer.py | Python | mit | 13,770 | 0.001888 |
from storytext.javaswttoolkit import describer as swtdescriber
from org.eclipse.core.internal.runtime import InternalPlatform
from org.eclipse.ui.forms.widgets import ExpandableComposite
import os
from pprint import pprint
class Describer(swtdescriber.Describer):
swtdescriber.Describer.stateWidgets = [ ExpandableComposite ] + swtdescriber.Describer.stateWidgets
swtdescriber.Describer.ignoreChildren = (ExpandableComposite,) + swtdescriber.Describer.ignoreChildren
def buildImages(self):
swtdescriber.Describer.buildImages(self)
self.buildImagesFromBundles()
def buildImagesFromBundles(self):
allImageTypes = [ "gif", "png", "jpg" ]
allImageTypes += [ i.upper() for i in allImageTypes ]
cacheFile = os.path.join(os.getenv("STORYTEXT_HOME"), "osgi_bundle_image_types")
cacheExists = os.path.isfile(cacheFile)
bundleImageTypes = eval(open(cacheFile).read()) if cacheExists else {}
for bundle in InternalPlatform.getDefault().getBundleContext().getBundles():
usedTypes = []
name = bundle.getSymbolicName()
imageTypes = bundleImageTypes.get(name, allImageTypes)
for imageType in imageTypes:
self.logger.debug("Searching bundle " + name + " for images of type " + imageType)
images = bundle.findEntries("/", "*." + imageType, True)
if images and images.hasMoreElements():
self.storeAllImages(images)
usedTypes.append(imageType)
if not cacheExists:
bundleImageTypes[name] = usedTypes
if not cacheExists:
f = open(cacheFile, "w")
pprint(bundleImageTypes, f)
f.close()
def storeAllImages(self, entries):
while entries.hasMoreElements():
url = entries.nextElement()
self.storeImageData(url)
def getExpandableCompositeState(self, widget):
return widget.isExpanded()
def getExpandableCompositeDescription(self, widget):
state = self.getExpandableCompositeState(widget)
self.widgetsWithState[widget] = state
desc = "Expandable '" + widget.getText() + "' "
desc += "(expanded)" if state else "(collapsed)"
if state:
clientDesc = self.getDescription(widget.getClient())
desc += "\n " + clientDesc.replace("\n", "\n ")
return desc
| emilybache/texttest-runner | src/main/python/storytext/lib/storytext/javarcptoolkit/describer.py | Python | mit | 2,499 | 0.008003 |
#!/usr/bin/env python
from connection import Conn as Connection
| Polychart/builder | server/polychartQuery/csv/__init__.py | Python | agpl-3.0 | 65 | 0 |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
from .common import src_tree_iterator
DESC = ""
def populate_args(extract_args_p):
extract_args = extract_args_p.add_argument_group('TREE EDIT OPTIONS')
extract_args.add_argument("--orthologs", dest="orthologs",
nargs="*",
help="")
extract_args.add_argument("--duplications", dest="duplications",
action="store_true",
help="")
def run(args):
from .. import Tree, PhyloTree
for nw in src_tree_iterator(args):
if args.orthologs is not None:
t = PhyloTree(nw)
for e in t.get_descendant_evol_events():
print(e.in_seqs, e.out_seqs)
| karrtikr/ete | ete3/tools/ete_extract.py | Python | gpl-3.0 | 2,246 | 0.00089 |
# devices/md.py
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import os
import six
from gi.repository import BlockDev as blockdev
from ..devicelibs import mdraid, raid
from .. import errors
from .. import util
from ..flags import flags
from ..storage_log import log_method_call
from .. import udev
from ..size import Size
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from .container import ContainerDevice
from .raid import RaidDevice
class MDRaidArrayDevice(ContainerDevice, RaidDevice):
""" An mdraid (Linux RAID) device. """
_type = "mdarray"
_packages = ["mdadm"]
_devDir = "/dev/md"
_formatClassName = property(lambda s: "mdmember")
_formatUUIDAttr = property(lambda s: "mdUuid")
def __init__(self, name, level=None, major=None, minor=None, size=None,
memberDevices=None, totalDevices=None,
uuid=None, fmt=None, exists=False, metadataVersion=None,
parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword uuid: the device UUID
:type uuid: str
:keyword level: the device's RAID level
:type level: any valid RAID level descriptor
:keyword int memberDevices: the number of active member devices
:keyword int totalDevices: the total number of member devices
:keyword metadataVersion: the version of the device's md metadata
:type metadataVersion: str (eg: "0.90")
:keyword minor: the device minor (obsolete?)
:type minor: int
"""
# pylint: disable=unused-argument
# These attributes are used by _addParent, so they must be initialized
# prior to instantiating the superclass.
self._memberDevices = 0 # the number of active (non-spare) members
self._totalDevices = 0 # the total number of members
# avoid attribute-defined-outside-init pylint warning
self._level = None
super(MDRaidArrayDevice, self).__init__(name, fmt=fmt, uuid=uuid,
exists=exists, size=size,
parents=parents,
sysfsPath=sysfsPath)
try:
self.level = level
except errors.DeviceError as e:
# Could not set the level, so set loose the parents that were
# added in superclass constructor.
for dev in self.parents:
dev.removeChild()
raise e
self.uuid = uuid
self._totalDevices = util.numeric_type(totalDevices)
self.memberDevices = util.numeric_type(memberDevices)
self.chunkSize = mdraid.MD_CHUNK_SIZE
if not self.exists and not isinstance(metadataVersion, str):
self.metadataVersion = "default"
else:
self.metadataVersion = metadataVersion
if self.parents and self.parents[0].type == "mdcontainer" and self.type != "mdbiosraidarray":
raise errors.DeviceError("A device with mdcontainer member must be mdbiosraidarray.")
if self.exists and self.mdadmFormatUUID and not flags.testing:
# this is a hack to work around mdadm's insistence on giving
# really high minors to arrays it has no config entry for
with open("/etc/mdadm.conf", "a") as c:
c.write("ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID))
@property
def mdadmFormatUUID(self):
""" This array's UUID, formatted for external use.
:returns: the array's UUID in mdadm format, if available
:rtype: str or NoneType
"""
formatted_uuid = None
if self.uuid is not None:
try:
formatted_uuid = blockdev.md.get_md_uuid(self.uuid)
except blockdev.MDRaidError:
pass
return formatted_uuid
@property
def level(self):
""" Return the raid level
:returns: raid level value
:rtype: an object that represents a RAID level
"""
return self._level
@property
def _levels(self):
""" Allowed RAID level for this type of device."""
return mdraid.RAID_levels
@level.setter
def level(self, value):
""" Set the RAID level and enforce restrictions based on it.
:param value: new raid level
:param type: object
:raises :class:`~.errors.DeviceError`: if value does not describe
a valid RAID level
:returns: None
"""
try:
level = self._getLevel(value, self._levels)
except ValueError as e:
raise errors.DeviceError(e)
self._level = level
@property
def createBitmap(self):
""" Whether or not a bitmap should be created on the array.
If the the array is sufficiently small, a bitmap yields no benefit.
If the array has no redundancy, a bitmap is just pointless.
"""
try:
return self.level.has_redundancy() and self.size >= Size(1000) and self.format.type != "swap"
except errors.RaidError:
# If has_redundancy() raises an exception then this device has
# a level for which the redundancy question is meaningless. In
# that case, creating a write-intent bitmap would be a meaningless
# action.
return False
def getSuperBlockSize(self, raw_array_size):
"""Estimate the superblock size for a member of an array,
given the total available memory for this array and raid level.
:param raw_array_size: total available for this array and level
:type raw_array_size: :class:`~.size.Size`
:returns: estimated superblock size
:rtype: :class:`~.size.Size`
"""
return blockdev.md.get_superblock_size(raw_array_size,
version=self.metadataVersion)
@property
def size(self):
"""Returns the actual or estimated size depending on whether or
not the array exists.
"""
if not self.exists or not self.mediaPresent:
try:
size = self.level.get_size([d.size for d in self.devices],
self.memberDevices,
self.chunkSize,
self.getSuperBlockSize)
except (blockdev.MDRaidError, errors.RaidError) as e:
log.info("could not calculate size of device %s for raid level %s: %s", self.name, self.level, e)
size = Size(0)
log.debug("non-existent RAID %s size == %s", self.level, size)
else:
size = self.currentSize
log.debug("existing RAID %s size == %s", self.level, size)
return size
def updateSize(self):
# pylint: disable=bad-super-call
super(ContainerDevice, self).updateSize()
@property
def description(self):
levelstr = self.level.nick if self.level.nick else self.level.name
return "MDRAID set (%s)" % levelstr
def __repr__(self):
s = StorageDevice.__repr__(self)
s += (" level = %(level)s spares = %(spares)s\n"
" members = %(memberDevices)s\n"
" total devices = %(totalDevices)s"
" metadata version = %(metadataVersion)s" %
{"level": self.level, "spares": self.spares,
"memberDevices": self.memberDevices,
"totalDevices": self.totalDevices,
"metadataVersion": self.metadataVersion})
return s
@property
def dict(self):
d = super(MDRaidArrayDevice, self).dict
d.update({"level": str(self.level),
"spares": self.spares, "memberDevices": self.memberDevices,
"totalDevices": self.totalDevices,
"metadataVersion": self.metadataVersion})
return d
@property
def mdadmConfEntry(self):
""" This array's mdadm.conf entry. """
uuid = self.mdadmFormatUUID
if self.memberDevices is None or not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
fmt = "ARRAY %s level=%s num-devices=%d UUID=%s\n"
return fmt % (self.path, self.level, self.memberDevices, uuid)
@property
def totalDevices(self):
""" Total number of devices in the array, including spares. """
if not self.exists:
return self._totalDevices
else:
return len(self.parents)
def _getMemberDevices(self):
return self._memberDevices
def _setMemberDevices(self, number):
if not isinstance(number, six.integer_types):
raise ValueError("memberDevices must be an integer")
if not self.exists and number > self.totalDevices:
raise ValueError("memberDevices cannot be greater than totalDevices")
self._memberDevices = number
memberDevices = property(_getMemberDevices, _setMemberDevices,
doc="number of member devices")
def _getSpares(self):
spares = 0
if self.memberDevices is not None:
if self.totalDevices is not None and \
self.totalDevices > self.memberDevices:
spares = self.totalDevices - self.memberDevices
elif self.totalDevices is None:
spares = self.memberDevices
self._totalDevices = self.memberDevices
return spares
def _setSpares(self, spares):
max_spares = self.level.get_max_spares(len(self.parents))
if spares > max_spares:
log.debug("failed to set new spares value %d (max is %d)",
spares, max_spares)
raise errors.DeviceError("new spares value is too large")
if self.totalDevices > spares:
self.memberDevices = self.totalDevices - spares
spares = property(_getSpares, _setSpares)
def _addParent(self, member):
super(MDRaidArrayDevice, self)._addParent(member)
if self.status and member.format.exists:
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
# These should be incremented when adding new member devices except
# during devicetree.populate. When detecting existing arrays we will
# have gotten these values from udev and will use them to determine
# whether we found all of the members, so we shouldn't change them in
# that case.
if not member.format.exists:
self._totalDevices += 1
self.memberDevices += 1
def _removeParent(self, member):
error_msg = self._validateParentRemoval(self.level, member)
if error_msg:
raise errors.DeviceError(error_msg)
super(MDRaidArrayDevice, self)._removeParent(member)
self.memberDevices -= 1
@property
def _trueStatusStrings(self):
""" Strings in state file for which status() should return True."""
return ("clean", "active", "active-idle", "readonly", "read-auto")
@property
def status(self):
""" This device's status.
For now, this should return a boolean:
True the device is open and ready for use
False the device is not open
"""
# check the status in sysfs
status = False
if not self.exists:
return status
if os.path.exists(self.path) and not self.sysfsPath:
# the array has been activated from outside of blivet
self.updateSysfsPath()
# make sure the active array is the one we expect
info = udev.get_device(self.sysfsPath)
uuid = udev.device_get_md_uuid(info)
if uuid and uuid != self.uuid:
log.warning("md array %s is active, but has UUID %s -- not %s",
self.path, uuid, self.uuid)
self.sysfsPath = ""
return status
state_file = "%s/md/array_state" % self.sysfsPath
try:
state = open(state_file).read().strip()
if state in self._trueStatusStrings:
status = True
except IOError:
status = False
return status
def memberStatus(self, member):
if not (self.status and member.status):
return
member_name = os.path.basename(member.sysfsPath)
path = "/sys/%s/md/dev-%s/state" % (self.sysfsPath, member_name)
try:
state = open(path).read().strip()
except IOError:
state = None
return state
@property
def degraded(self):
""" Return True if the array is running in degraded mode. """
rc = False
degraded_file = "%s/md/degraded" % self.sysfsPath
if os.access(degraded_file, os.R_OK):
val = open(degraded_file).read().strip()
if val == "1":
rc = True
return rc
@property
def members(self):
""" Returns this array's members.
:rtype: list of :class:`StorageDevice`
"""
return list(self.parents)
@property
def complete(self):
""" An MDRaidArrayDevice is complete if it has at least as many
component devices as its count of active devices.
"""
return (self.memberDevices <= len(self.members)) or not self.exists
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def _postSetup(self):
super(MDRaidArrayDevice, self)._postSetup()
self.updateSysfsPath()
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
disks = []
for member in self.devices:
member.setup(orig=orig)
disks.append(member.path)
blockdev.md.activate(self.path, members=disks, uuid=self.mdadmFormatUUID)
def _postTeardown(self, recursive=False):
super(MDRaidArrayDevice, self)._postTeardown(recursive=recursive)
# mdadm reuses minors indiscriminantly when there is no mdadm.conf, so
# we need to clear the sysfs path now so our status method continues to
# give valid results
self.sysfsPath = ''
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# We don't really care what the array's state is. If the device
# file exists, we want to deactivate it. mdraid has too many
# states.
if self.exists and os.path.exists(self.path):
blockdev.md.deactivate(self.path)
self._postTeardown(recursive=recursive)
def _postCreate(self):
# this is critical since our status method requires a valid sysfs path
self.exists = True # this is needed to run updateSysfsPath
self.updateSysfsPath()
StorageDevice._postCreate(self)
# update our uuid attribute with the new array's UUID
# XXX this won't work for containers since no UUID is reported for them
info = blockdev.md.detail(self.path)
self.uuid = info.uuid
for member in self.devices:
member.format.mdUuid = self.uuid
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
disks = [disk.path for disk in self.devices]
spares = len(self.devices) - self.memberDevices
level = None
if self.level:
level = str(self.level)
blockdev.md.create(self.path, level, disks, spares,
version=self.metadataVersion,
bitmap=self.createBitmap)
udev.settle()
def _remove(self, member):
self.setup()
# see if the device must be marked as failed before it can be removed
fail = (self.memberStatus(member) == "in_sync")
blockdev.md.remove(self.path, member.path, fail)
def _add(self, member):
""" Add a member device to an array.
:param str member: the member's path
:raises: blockdev.MDRaidError
"""
self.setup()
raid_devices = None
try:
if not self.level.has_redundancy():
if self.level is not raid.Linear:
raid_devices = int(blockdev.md.detail(self.name).raid_devices) + 1
except errors.RaidError:
pass
blockdev.md.add(self.path, member.path, raid_devs=raid_devices)
@property
def formatArgs(self):
formatArgs = []
if self.format.type == "ext2":
recommended_stride = self.level.get_recommended_stride(self.memberDevices)
if recommended_stride:
formatArgs = ['-R', 'stride=%d' % recommended_stride ]
return formatArgs
@property
def model(self):
return self.description
def dracutSetupArgs(self):
return set(["rd.md.uuid=%s" % self.mdadmFormatUUID])
def populateKSData(self, data):
if self.isDisk:
return
super(MDRaidArrayDevice, self).populateKSData(data)
data.level = self.level.name
data.spares = self.spares
data.members = ["raid.%d" % p.id for p in self.parents]
data.preexist = self.exists
data.device = self.name
class MDContainerDevice(MDRaidArrayDevice):
_type = "mdcontainer"
def __init__(self, name, **kwargs):
kwargs['level'] = raid.Container
super(MDContainerDevice, self).__init__(name, **kwargs)
@property
def _levels(self):
return mdraid.MDRaidLevels(["container"])
@property
def description(self):
return "BIOS RAID container"
@property
def mdadmConfEntry(self):
uuid = self.mdadmFormatUUID
if not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
return "ARRAY %s UUID=%s\n" % (self.path, uuid)
@property
def _trueStatusStrings(self):
return ("clean", "active", "active-idle", "readonly", "read-auto", "inactive")
def teardown(self, recursive=None):
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
return
@property
def mediaPresent(self):
# Containers should not get any format handling done
# (the device node does not allow read / write calls)
return False
class MDBiosRaidArrayDevice(MDRaidArrayDevice):
_type = "mdbiosraidarray"
_formatClassName = property(lambda s: None)
_isDisk = True
_partitionable = True
def __init__(self, name, **kwargs):
super(MDBiosRaidArrayDevice, self).__init__(name, **kwargs)
# For container members probe size now, as we cannot determine it
# when teared down.
self._size = self.currentSize
@property
def size(self):
# For container members return probed size, as we cannot determine it
# when teared down.
return self._size
@property
def description(self):
levelstr = self.level.nick if self.level.nick else self.level.name
return "BIOS RAID set (%s)" % levelstr
@property
def mdadmConfEntry(self):
uuid = self.mdadmFormatUUID
if not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
return "ARRAY %s UUID=%s\n" % (self.path, uuid)
@property
def members(self):
# If the array is a BIOS RAID array then its unique parent
# is a container and its actual member devices are the
# container's parents.
return list(self.parents[0].parents)
def teardown(self, recursive=None):
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
return
| dwlehman/blivet | blivet/devices/md.py | Python | lgpl-2.1 | 22,939 | 0.000915 |
"""
Title: Image similarity estimation using a Siamese Network with a contrastive loss
Author: Mehdi
Date created: 2021/05/06
Last modified: 2021/05/06
Description: Similarity learning using a siamese network trained with a contrastive loss.
"""
"""
## Introduction
[Siamese Networks](https://en.wikipedia.org/wiki/Siamese_neural_network)
are neural networks which share weights between two or more sister networks,
each producing embedding vectors of its respective inputs.
In supervised similarity learning, the networks are then trained to maximize the
contrast (distance) between embeddings of inputs of different classes, while minimizing the distance between
embeddings of similar classes, resulting in embedding spaces that reflect
the class segmentation of the training inputs.
"""
"""
## Setup
"""
import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
"""
## Hyperparameters
"""
epochs = 10
batch_size = 16
margin = 1 # Margin for constrastive loss.
"""
## Load the MNIST dataset
"""
(x_train_val, y_train_val), (x_test, y_test) = keras.datasets.mnist.load_data()
# Change the data type to a floating point format
x_train_val = x_train_val.astype("float32")
x_test = x_test.astype("float32")
"""
## Define training and validation sets
"""
# Keep 50% of train_val in validation set
x_train, x_val = x_train_val[:30000], x_train_val[30000:]
y_train, y_val = y_train_val[:30000], y_train_val[30000:]
del x_train_val, y_train_val
"""
## Create pairs of images
We will train the model to differentiate between digits of different classes. For
example, digit `0` needs to be differentiated from the rest of the
digits (`1` through `9`), digit `1` - from `0` and `2` through `9`, and so on.
To carry this out, we will select N random images from class A (for example,
for digit `0`) and pair them with N random images from another class B
(for example, for digit `1`). Then, we can repeat this process for all classes
of digits (until digit `9`). Once we have paired digit `0` with other digits,
we can repeat this process for the remaining classes for the rest of the digits
(from `1` until `9`).
"""
def make_pairs(x, y):
"""Creates a tuple containing image pairs with corresponding label.
Arguments:
x: List containing images, each index in this list corresponds to one image.
y: List containing labels, each label with datatype of `int`.
Returns:
Tuple containing two numpy arrays as (pairs_of_samples, labels),
where pairs_of_samples' shape is (2len(x), 2,n_features_dims) and
labels are a binary array of shape (2len(x)).
"""
num_classes = max(y) + 1
digit_indices = [np.where(y == i)[0] for i in range(num_classes)]
pairs = []
labels = []
for idx1 in range(len(x)):
# add a matching example
x1 = x[idx1]
label1 = y[idx1]
idx2 = random.choice(digit_indices[label1])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [1]
# add a non-matching example
label2 = random.randint(0, num_classes - 1)
while label2 == label1:
label2 = random.randint(0, num_classes - 1)
idx2 = random.choice(digit_indices[label2])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [0]
return np.array(pairs), np.array(labels).astype("float32")
# make train pairs
pairs_train, labels_train = make_pairs(x_train, y_train)
# make validation pairs
pairs_val, labels_val = make_pairs(x_val, y_val)
# make test pairs
pairs_test, labels_test = make_pairs(x_test, y_test)
"""
We get:
**pairs_train.shape = (60000, 2, 28, 28)**
- We have 60,000 pairs
- Each pair contains 2 images
- Each image has shape `(28, 28)`
"""
"""
Split the training pairs
"""
x_train_1 = pairs_train[:, 0] # x_train_1.shape is (60000, 28, 28)
x_train_2 = pairs_train[:, 1]
"""
Split the validation pairs
"""
x_val_1 = pairs_val[:, 0] # x_val_1.shape = (60000, 28, 28)
x_val_2 = pairs_val[:, 1]
"""
Split the test pairs
"""
x_test_1 = pairs_test[:, 0] # x_test_1.shape = (20000, 28, 28)
x_test_2 = pairs_test[:, 1]
"""
## Visualize pairs and their labels
"""
def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False):
"""Creates a plot of pairs and labels, and prediction if it's test dataset.
Arguments:
pairs: Numpy Array, of pairs to visualize, having shape
(Number of pairs, 2, 28, 28).
to_show: Int, number of examples to visualize (default is 6)
`to_show` must be an integral multiple of `num_col`.
Otherwise it will be trimmed if it is greater than num_col,
and incremented if if it is less then num_col.
num_col: Int, number of images in one row - (default is 3)
For test and train respectively, it should not exceed 3 and 7.
predictions: Numpy Array of predictions with shape (to_show, 1) -
(default is None)
Must be passed when test=True.
test: Boolean telling whether the dataset being visualized is
train dataset or test dataset - (default False).
Returns:
None.
"""
# Define num_row
# If to_show % num_col != 0
# trim to_show,
# to trim to_show limit num_row to the point where
# to_show % num_col == 0
#
# If to_show//num_col == 0
# then it means num_col is greater then to_show
# increment to_show
# to increment to_show set num_row to 1
num_row = to_show // num_col if to_show // num_col != 0 else 1
# `to_show` must be an integral multiple of `num_col`
# we found num_row and we have num_col
# to increment or decrement to_show
# to make it integral multiple of `num_col`
# simply set it equal to num_row * num_col
to_show = num_row * num_col
# Plot the images
fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))
for i in range(to_show):
# If the number of rows is 1, the axes array is one-dimensional
if num_row == 1:
ax = axes[i % num_col]
else:
ax = axes[i // num_col, i % num_col]
ax.imshow(tf.concat([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
ax.set_axis_off()
if test:
ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0]))
else:
ax.set_title("Label: {}".format(labels[i]))
if test:
plt.tight_layout(rect=(0, 0, 1.9, 1.9), w_pad=0.0)
else:
plt.tight_layout(rect=(0, 0, 1.5, 1.5))
plt.show()
"""
Inspect training pairs
"""
visualize(pairs_train[:-1], labels_train[:-1], to_show=4, num_col=4)
"""
Inspect validation pairs
"""
visualize(pairs_val[:-1], labels_val[:-1], to_show=4, num_col=4)
"""
Inspect test pairs
"""
visualize(pairs_test[:-1], labels_test[:-1], to_show=4, num_col=4)
"""
## Define the model
There are be two input layers, each leading to its own network, which
produces embeddings. A `Lambda` layer then merges them using an
[Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) and the
merged output is fed to the final network.
"""
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
def euclidean_distance(vects):
"""Find the Euclidean distance between two vectors.
Arguments:
vects: List containing two tensors of same length.
Returns:
Tensor containing euclidean distance
(as floating point value) between vectors.
"""
x, y = vects
sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)
return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon()))
input = layers.Input((28, 28, 1))
x = tf.keras.layers.BatchNormalization()(input)
x = layers.Conv2D(4, (5, 5), activation="tanh")(x)
x = layers.AveragePooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(16, (5, 5), activation="tanh")(x)
x = layers.AveragePooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = layers.Dense(10, activation="tanh")(x)
embedding_network = keras.Model(input, x)
input_1 = layers.Input((28, 28, 1))
input_2 = layers.Input((28, 28, 1))
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
# same embedding network for both tower networks.
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])
normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)
output_layer = layers.Dense(1, activation="sigmoid")(normal_layer)
siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
"""
## Define the constrastive Loss
"""
def loss(margin=1):
"""Provides 'constrastive_loss' an enclosing scope with variable 'margin'.
Arguments:
margin: Integer, defines the baseline for distance for which pairs
should be classified as dissimilar. - (default is 1).
Returns:
'constrastive_loss' function with data ('margin') attached.
"""
# Contrastive loss = mean( (1-true_value) * square(prediction) +
# true_value * square( max(margin-prediction, 0) ))
def contrastive_loss(y_true, y_pred):
"""Calculates the constrastive loss.
Arguments:
y_true: List of labels, each label is of type float32.
y_pred: List of predictions of same length as of y_true,
each label is of type float32.
Returns:
A tensor containing constrastive loss as floating point value.
"""
square_pred = tf.math.square(y_pred)
margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))
return tf.math.reduce_mean(
(1 - y_true) * square_pred + (y_true) * margin_square
)
return contrastive_loss
"""
## Compile the model with the contrastive loss
"""
siamese.compile(loss=loss(margin=margin), optimizer="RMSprop", metrics=["accuracy"])
siamese.summary()
"""
## Train the model
"""
history = siamese.fit(
[x_train_1, x_train_2],
labels_train,
validation_data=([x_val_1, x_val_2], labels_val),
batch_size=batch_size,
epochs=epochs,
)
"""
## Visualize results
"""
def plt_metric(history, metric, title, has_valid=True):
"""Plots the given 'metric' from 'history'.
Arguments:
history: history attribute of History object returned from Model.fit.
metric: Metric to plot, a string value present as key in 'history'.
title: A string to be used as title of plot.
has_valid: Boolean, true if valid data was passed to Model.fit else false.
Returns:
None.
"""
plt.plot(history[metric])
if has_valid:
plt.plot(history["val_" + metric])
plt.legend(["train", "validation"], loc="upper left")
plt.title(title)
plt.ylabel(metric)
plt.xlabel("epoch")
plt.show()
# Plot the accuracy
plt_metric(history=history.history, metric="accuracy", title="Model accuracy")
# Plot the constrastive loss
plt_metric(history=history.history, metric="loss", title="Constrastive Loss")
"""
## Evaluate the model
"""
results = siamese.evaluate([x_test_1, x_test_2], labels_test)
print("test loss, test acc:", results)
"""
## Visualize the predictions
"""
predictions = siamese.predict([x_test_1, x_test_2])
visualize(pairs_test, labels_test, to_show=3, predictions=predictions, test=True)
| keras-team/keras-io | examples/vision/siamese_contrastive.py | Python | apache-2.0 | 11,646 | 0.001717 |
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to resolve TF-Hub Module stored in compressed TGZ format."""
import hashlib
import urllib
import tensorflow as tf
from tensorflow_hub import resolver
LOCK_FILE_TIMEOUT_SEC = 10 * 60 # 10 minutes
_COMPRESSED_FORMAT_QUERY = ("tf-hub-format", "compressed")
def _module_dir(handle):
"""Returns the directory where to cache the module."""
cache_dir = resolver.tfhub_cache_dir(use_temp=True)
return resolver.create_local_module_dir(
cache_dir,
hashlib.sha1(handle.encode("utf8")).hexdigest())
def _is_tarfile(filename):
"""Returns true if 'filename' is TAR file."""
return filename.endswith((".tar", ".tar.gz", ".tgz"))
class HttpCompressedFileResolver(resolver.HttpResolverBase):
"""Resolves HTTP handles by downloading and decompressing them to local fs."""
def is_supported(self, handle):
# HTTP(S) handles are assumed to point to tarfiles.
if not self.is_http_protocol(handle):
return False
# AUTO defaults to COMPRESSED
load_format = resolver.model_load_format()
return load_format in [
resolver.ModelLoadFormat.COMPRESSED.value,
resolver.ModelLoadFormat.AUTO.value
]
def __call__(self, handle):
module_dir = _module_dir(handle)
def download(handle, tmp_dir):
"""Fetch a module via HTTP(S), handling redirect and download headers."""
request = urllib.request.Request(
self._append_compressed_format_query(handle))
response = self._call_urlopen(request)
return resolver.DownloadManager(handle).download_and_uncompress(
response, tmp_dir)
return resolver.atomic_download(handle, download, module_dir,
self._lock_file_timeout_sec())
def _lock_file_timeout_sec(self):
# This method is provided as a convenience to simplify testing.
return LOCK_FILE_TIMEOUT_SEC
def _append_compressed_format_query(self, handle):
return self._append_format_query(handle, _COMPRESSED_FORMAT_QUERY)
class GcsCompressedFileResolver(resolver.Resolver):
"""Resolves GCS handles by downloading and decompressing them to local fs."""
def is_supported(self, handle):
return handle.startswith("gs://") and _is_tarfile(handle)
def __call__(self, handle):
module_dir = _module_dir(handle)
def download(handle, tmp_dir):
return resolver.DownloadManager(handle).download_and_uncompress(
tf.compat.v1.gfile.GFile(handle, "rb"), tmp_dir)
return resolver.atomic_download(handle, download, module_dir,
LOCK_FILE_TIMEOUT_SEC)
| tensorflow/hub | tensorflow_hub/compressed_module_resolver.py | Python | apache-2.0 | 3,271 | 0.006114 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import get_single_element
from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class GetSingleElementTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("Zero", 0, 1),
("Five", 5, 1),
("Ten", 10, 1),
("Empty", 100, 1, errors.InvalidArgumentError, "Dataset was empty."),
("MoreThanOne", 0, 2, errors.InvalidArgumentError,
"Dataset had more than one element."),
)
def testGetSingleElement(self, skip, take, error=None, error_msg=None):
skip_t = array_ops.placeholder(dtypes.int64, shape=[])
take_t = array_ops.placeholder(dtypes.int64, shape=[])
def make_sparse(x):
x_1d = array_ops.reshape(x, [1])
x_2d = array_ops.reshape(x, [1, 1])
return sparse_tensor.SparseTensor(x_2d, x_1d, x_1d)
dataset = dataset_ops.Dataset.range(100).skip(skip_t).map(
lambda x: (x * x, make_sparse(x))).take(take_t)
element = get_single_element.get_single_element(dataset)
with self.test_session() as sess:
if error is None:
dense_val, sparse_val = sess.run(
element, feed_dict={
skip_t: skip,
take_t: take
})
self.assertEqual(skip * skip, dense_val)
self.assertAllEqual([[skip]], sparse_val.indices)
self.assertAllEqual([skip], sparse_val.values)
self.assertAllEqual([skip], sparse_val.dense_shape)
else:
with self.assertRaisesRegexp(error, error_msg):
sess.run(element, feed_dict={skip_t: skip, take_t: take})
@parameterized.named_parameters(
("SumZero", 0),
("SumOne", 1),
("SumFive", 5),
("SumTen", 10),
)
def testReduceDataset(self, stop):
def init_fn(_):
return np.int64(0)
def reduce_fn(state, value):
return state + value
def finalize_fn(state):
return state
sum_reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
stop_t = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset_ops.Dataset.range(stop_t)
element = get_single_element.reduce_dataset(dataset, sum_reducer)
with self.test_session() as sess:
value = sess.run(element, feed_dict={stop_t: stop})
self.assertEqual(stop * (stop - 1) / 2, value)
if __name__ == "__main__":
test.main()
| jart/tensorflow | tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py | Python | apache-2.0 | 3,539 | 0.004521 |
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. seealso::
JSON1_
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| gltn/stdm | stdm/third_party/sqlalchemy/dialects/sqlite/json.py | Python | gpl-2.0 | 2,292 | 0 |
import logging
import os
import sys
import tkinter
from tkinter import ttk
sys.path.append('../..')
import cv2
from src.image.imnp import ImageNP
from src.support.tkconvert import TkConverter
from src.view.template import TkViewer
from src.view.tkfonts import TkFonts
from src.view.tkframe import TkFrame, TkLabelFrame
from src.view.ttkstyle import TTKStyle, init_css
LOGGER = logging.getLogger(__name__)
THRESHOLD_OPTION = [(u'手動', 'manual'), ('Mean Adaptive', 'mean'), ('Gaussian Adaptive', 'gaussian')]
class GraphCutViewer(TkViewer):
def __init__(self):
super().__init__()
self._im_w, self._im_h = 800, 533
self._init_window(zoom=False)
self._init_style()
self._init_frame()
self._init_menu()
def _init_style(self):
init_css()
theme = 'default'
if os.name == 'posix':
theme = 'alt'
TTKStyle('H4Padding.TLabelframe', theme=theme, background='gray82')
TTKStyle('H4Padding.TLabelframe.Label', theme=theme, font=('', 16), background='gray82')
TTKStyle('H2BlackBold.TLabel', theme=theme, font=('', 24, 'bold'), background='white', foreground='black')
TTKStyle('H2RedBold.TLabel', theme=theme, font=('', 24, 'bold'), background='white', foreground='red')
self.font = TkFonts()
# init frame
def _init_frame(self):
# root
self.frame_root = TkFrame(self.root, bg='white')
self.frame_root.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_root, 0, 1, 2)
self.set_all_grid_columnconfigure(self.frame_root, 0)
# head
self.frame_head = TkFrame(self.frame_root, bg='white')
self.frame_head.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_head, 0)
self.set_all_grid_columnconfigure(self.frame_head, 0)
# body
self.frame_body = TkFrame(self.frame_root, bg='black')
self.frame_body.grid(row=1, column=0, sticky='news')
self.set_all_grid_columnconfigure(self.frame_body, 0, 1)
self.set_all_grid_rowconfigure(self.frame_body, 0)
# body > panel
self.frame_panel = TkFrame(self.frame_body, bg='light pink')
self.frame_panel.grid(row=0, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_panel, 0)
self.set_all_grid_columnconfigure(self.frame_panel, 0)
# body > display
self.frame_display = TkFrame(self.frame_body, bg='royal blue')
self.frame_display.grid(row=0, column=1, sticky='news')
self.set_all_grid_rowconfigure(self.frame_display, 0)
self.set_all_grid_columnconfigure(self.frame_display, 0)
# footer
self.frame_footer = TkFrame(self.frame_root, bg='gray82')
self.frame_footer.grid(row=2, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_footer, 0, 1)
self.set_all_grid_columnconfigure(self.frame_footer, 0)
# footer > panel setting
self.frame_panel_setting = ttk.LabelFrame(self.frame_footer, text=u'輸入圖片選項: ', style='H4Padding.TLabelframe')
self.frame_panel_setting.grid(row=0, column=0, sticky='news', pady=10)
self.set_all_grid_rowconfigure(self.frame_panel_setting, 0, 1)
self.set_all_grid_columnconfigure(self.frame_panel_setting, 0)
# footer > panel setting > template option
self.frame_template_options = TkFrame(self.frame_panel_setting, bg='gray82', pady=5)
self.frame_template_options.grid(row=0, column=0, sticky='news')
# footer > panel setting > gamma
self.frame_gamma = TkFrame(self.frame_panel_setting, bg='gray82', pady=5)
self.frame_gamma.grid(row=1, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_gamma, 0)
self.set_all_grid_columnconfigure(self.frame_gamma, 0)
# footer > display setting
self.frame_display_setting = ttk.LabelFrame(self.frame_footer, text=u'輸出圖片選項: ', style='H4Padding.TLabelframe')
self.frame_display_setting.grid(row=1, column=0, sticky='news', pady=10)
self.set_all_grid_rowconfigure(self.frame_display_setting, 0)
self.set_all_grid_columnconfigure(self.frame_display_setting, 0)
# footer > display setting > threshold options
self.frame_threshold_options = TkFrame(self.frame_display_setting, bg='gray82', pady=5)
self.frame_threshold_options.grid(row=0, column=0, sticky='news')
# footer > display setting > manual threshold
self.frame_manual_threshold = TkFrame(self.frame_display_setting, bg='gray82', pady=5)
self.frame_manual_threshold.grid(row=1, column=0, sticky='news')
self.set_all_grid_rowconfigure(self.frame_manual_threshold, 0)
self.set_all_grid_columnconfigure(self.frame_manual_threshold, 0)
self._init_widget_head()
self._init_widget_body()
self._init_widget_footer()
# init head widget
def _init_widget_head(self):
self.set_all_grid_rowconfigure(self.frame_head, 0, 1)
self.label_state = ttk.Label(self.frame_head, text=u'現在模式: N/A', style='H2.TLabel')
self.label_state.grid(row=0, column=0, sticky='w')
self.label_resize = ttk.Label(self.frame_head, text=u'原有尺寸 N/A-> 顯示尺寸 N/A', style='H2.TLabel')
self.label_resize.grid(row=1, column=0, sticky='w')
# init body widget
def _init_widget_body(self):
# panel
self.set_all_grid_rowconfigure(self.frame_panel, 0, 1)
self.label_panel = ttk.Label(self.frame_panel, text='Input Panel', style='H2.TLabel')
self.label_panel.grid(row=0, column=0, sticky='ns')
self.photo_panel = ImageNP.generate_checkboard((self._im_h, self._im_w), block_size=10)
self.photo_panel = TkConverter.ndarray_to_photo(self.photo_panel)
self.label_panel_image = ttk.Label(self.frame_panel, image=self.photo_panel)
self.label_panel_image.grid(row=1, column=0, sticky='ns')
# display
self.label_display = ttk.Label(self.frame_display, text='Display', style='H2.TLabel')
self.label_display.grid(row=0, column=0, columnspan=3)
self.set_all_grid_rowconfigure(self.frame_display, 0, 1, 2)
self.set_all_grid_columnconfigure(self.frame_display, 0, 1, 2)
self.photo_small = ImageNP.generate_checkboard((self._im_h//2, self._im_w//3), 10)
self.photo_small = TkConverter.ndarray_to_photo(self.photo_small)
self.photo_large = ImageNP.generate_checkboard((self._im_h, self._im_w//3), 10)
self.photo_large = TkConverter.ndarray_to_photo(self.photo_large)
self.label_fl_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_fl_image.grid(row=1, column=0)
self.label_fr_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_fr_image.grid(row=1, column=1)
self.label_bl_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_bl_image.grid(row=2, column=0)
self.label_br_image = ttk.Label(self.frame_display, image=self.photo_small)
self.label_br_image.grid(row=2, column=1)
self.label_body_image = ttk.Label(self.frame_display, image=self.photo_large)
self.label_body_image.grid(row=1, column=2, rowspan=2)
# init footer widget
def _init_widget_footer(self):
# input panel template option
self.label_template = ttk.Label(self.frame_template_options, text=u'過濾方式: ', style='H5.TLabel')
self.label_template.grid(row=0, column=0, sticky='w')
self.val_checkbtn_floodfill = tkinter.StringVar()
self.checkbtn_floodfill = ttk.Checkbutton(
self.frame_template_options,
text=u'floodfill',
variable=self.val_checkbtn_floodfill,
onvalue='on', offvalue='off',
style='H5.TCheckbutton'
)
self.checkbtn_floodfill.grid(row=0, column=1, sticky='w')
# input panel gamma
self.label_gamma = ttk.Label(self.frame_gamma, text=u'調整對比 ({:.2f}): '.format(1.), style='H5.TLabel')
self.label_gamma.grid(row=0, column=0, sticky='w')
self.val_scale_gamma = tkinter.DoubleVar()
self.val_scale_gamma.set(1.0)
self.scale_gamma = ttk.Scale(self.frame_gamma,
orient=tkinter.HORIZONTAL,
length=self._im_w*2,
from_=0, to=2.5,
variable=self.val_scale_gamma,
style='Gray.Horizontal.TScale')
self.scale_gamma.state(('active', '!disabled'))
self.scale_gamma.grid(row=0, column=1, sticky='w')
# display threshold option
self.label_threshold_options = ttk.Label(self.frame_threshold_options, text=u'門檻值選項: ', style='H5.TLabel')
# self.label_threshold_options.grid(row=0, column=0, sticky='w')
self.val_threshold_option = tkinter.StringVar()
self.val_threshold_option.set(THRESHOLD_OPTION[0][-1])
self.radiobtn_threshold_options = []
for i, op in enumerate(THRESHOLD_OPTION):
text, val = op
radiobtn = ttk.Radiobutton(self.frame_threshold_options,
text=text,
variable=self.val_threshold_option,
value=val,
style='H5.TRadiobutton')
# radiobtn.grid(row=0, column=i+1, sticky='w', padx=10)
self.radiobtn_threshold_options.append(radiobtn)
# display threshold manual scale
self.label_manual_threshold = ttk.Label(self.frame_manual_threshold, text=u'門檻值 ({:.2f}): '.format(250), style='H5.TLabel')
self.label_manual_threshold.grid(row=0, column=0, sticky='w')
self.val_manual_threshold = tkinter.DoubleVar()
self.val_manual_threshold.set(250)
self.scale_manual_threshold = ttk.Scale(self.frame_manual_threshold,
orient=tkinter.HORIZONTAL,
length=self._im_w*2,
from_=1, to=254,
variable=self.val_manual_threshold,
style='Gray.Horizontal.TScale')
self.scale_manual_threshold.state(('active', '!disabled'))
self.scale_manual_threshold.grid(row=0, column=1, sticky='news', columnspan=len(THRESHOLD_OPTION))
# init menu bar
def _init_menu(self):
# root
self.menu_root = tkinter.Menu(self.root)
self.root.config(menu=self.menu_root)
# load image
self.menu_load_img = tkinter.Menu(self.menu_root)
# show menu
self.menu_root.add_cascade(label=u'File', menu=self.menu_load_img)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)12s:L%(lineno)3s [%(levelname)8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout
)
graphcut_viewer = GraphCutViewer()
graphcut_viewer.mainloop()
| afunTW/moth-graphcut | src/view/graphcut_app.py | Python | mit | 11,346 | 0.003552 |
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from load import load_dataset
import numpy as np
from threshold import learn_model, apply_model, accuracy
features, labels = load_dataset('seeds')
# Turn the labels into a binary array
labels = (labels == 'Canadian')
error = 0.0
for fold in range(10):
training = np.ones(len(features), bool)
# numpy magic to make an array with 10% of 0s starting at fold
training[fold::10] = 0
# whatever is not training is for testing
testing = ~training
model = learn_model(features[training], labels[training])
test_error = accuracy(features[testing], labels[testing], model)
error += test_error
error /= 10.0
print('Ten fold cross-validated error was {0:.1%}.'.format(error))
| krahman/BuildingMachineLearningSystemsWithPython | ch02/seeds_threshold.py | Python | mit | 921 | 0 |
#!/usr/bin/env python
""" patrol_smach_iterator.py - Version 1.0 2013-10-23
Control a robot using SMACH to patrol a square area a specified number of times
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
import smach
from smach import StateMachine, Iterator
from smach_ros import SimpleActionState, IntrospectionServer
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from math import radians, pi
class main():
def __init__(self):
rospy.init_node('patrol_smach', anonymous=False)
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
self.init()
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait up to 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move_base action server")
# Track success rate of getting to the goal locations
self.n_succeeded = 0
self.n_aborted = 0
self.n_preempted = 0
self.n_patrols = 2
# Turn the waypoints into SMACH states
nav_states = list()
for waypoint in self.waypoints:
nav_goal = MoveBaseGoal()
nav_goal.target_pose.header.frame_id = 'map'
nav_goal.target_pose.pose = waypoint
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0),
server_wait_timeout=rospy.Duration(10.0))
nav_states.append(move_base_state)
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0))
# Initialize the top level state machine
self.sm = StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm:
# Initialize the iterator
self.sm_patrol_iterator = Iterator(outcomes = ['succeeded','preempted','aborted'],
input_keys = [],
it = lambda: range(0, self.n_patrols),
output_keys = [],
it_label = 'index',
exhausted_outcome = 'succeeded')
with self.sm_patrol_iterator:
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted','continue'])
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})
StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})
StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})
StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})
StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'continue','aborted':'continue','preempted':'continue'})
# Close the sm_patrol machine and add it to the iterator
Iterator.set_contained_state('PATROL_STATE', self.sm_patrol, loop_outcomes=['continue'])
# Close the top level state machine
StateMachine.add('PATROL_ITERATOR', self.sm_patrol_iterator, {'succeeded':'succeeded', 'aborted':'aborted'})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm, '/SM_ROOT')
intro_server.start()
# Execute the state machine
sm_outcome = self.sm.execute()
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def move_base_result_cb(self, userdata, status, result):
if status == actionlib.GoalStatus.SUCCEEDED:
self.n_succeeded += 1
elif status == actionlib.GoalStatus.ABORTED:
self.n_aborted += 1
elif status == actionlib.GoalStatus.PREEMPTED:
self.n_preempted += 1
try:
rospy.loginfo("Success rate: " + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))
except:
pass
def init(self):
# How big is the square we want the robot to patrol?
self.square_size = rospy.get_param("~square_size", 1.0) # meters
# How many times should we execute the patrol loop
self.n_patrols = rospy.get_param("~n_patrols", 3) # meters
# Create a list to hold the target quaternions (orientations)
quaternions = list()
# First define the corner orientations as Euler angles
euler_angles = (pi/2, pi, 3*pi/2, 0)
# Then convert the angles to quaternions
for angle in euler_angles:
q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz')
q = Quaternion(*q_angle)
quaternions.append(q)
# Create a list to hold the waypoint poses
self.waypoints = list()
# Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
self.waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
self.waypoints.append(Pose(Point(self.square_size, 0.0, 0.0), quaternions[0]))
self.waypoints.append(Pose(Point(self.square_size, self.square_size, 0.0), quaternions[1]))
self.waypoints.append(Pose(Point(0.0, self.square_size, 0.0), quaternions[2]))
# Initialize the waypoint visualization markers for RViz
self.init_waypoint_markers()
# Set a visualization marker at each waypoint
for waypoint in self.waypoints:
p = Point()
p = waypoint.position
self.waypoint_markers.points.append(p)
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
rospy.loginfo("Starting SMACH test")
# Publish the waypoint markers
self.marker_pub.publish(self.waypoint_markers)
rospy.sleep(1)
self.marker_pub.publish(self.waypoint_markers)
def init_waypoint_markers(self):
# Set up our waypoint markers
marker_scale = 0.2
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
# Define a marker publisher.
self.marker_pub = rospy.Publisher('waypoint_markers', Marker)
# Initialize the marker points list.
self.waypoint_markers = Marker()
self.waypoint_markers.ns = marker_ns
self.waypoint_markers.id = marker_id
self.waypoint_markers.type = Marker.CUBE_LIST
self.waypoint_markers.action = Marker.ADD
self.waypoint_markers.lifetime = rospy.Duration(marker_lifetime)
self.waypoint_markers.scale.x = marker_scale
self.waypoint_markers.scale.y = marker_scale
self.waypoint_markers.color.r = marker_color['r']
self.waypoint_markers.color.g = marker_color['g']
self.waypoint_markers.color.b = marker_color['b']
self.waypoint_markers.color.a = marker_color['a']
self.waypoint_markers.header.frame_id = 'odom'
self.waypoint_markers.header.stamp = rospy.Time.now()
self.waypoint_markers.points = list()
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.sm_patrol.request_preempt()
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
rospy.loginfo("SMACH test finished.")
| fujy/ROS-Project | src/rbx2/rbx2_tasks/nodes/patrol_smach_iterator.py | Python | mit | 9,741 | 0.01314 |
"""
check_mod.py
Copyright 2015 Adam Greig
Licensed under the MIT licence, see LICENSE file for details.
Check all footprint files in a directory against a set of consistency fules.
"""
from __future__ import print_function, division
import sys
import os
import glob
from decimal import Decimal
import argparse
from sexp import parse as sexp_parse
def checkrefval(mod, errs):
for fp_text in (node for node in mod if node[0] == "fp_text"):
if fp_text[1] not in ("reference", "value"):
continue
layer = [n for n in fp_text if n[0] == "layer"][0]
if layer[1] != "F.Fab":
errs.append("Value and Reference fields must be on F.Fab")
if fp_text[1] == "reference" and fp_text[2] != "REF**":
errs.append("Reference field must contain REF**")
if fp_text[1] == "value" and not mod[1].startswith(fp_text[2]):
errs.append("Value field must contain module name")
def checkfont(mod, errs):
for fp_text in (node for node in mod if node[0] == "fp_text"):
effects = [n for n in fp_text if n[0] == "effects"][0]
font = [n for n in effects if n[0] == "font"][0]
size = [n for n in font if n[0] == "size"][0]
thickness = [n for n in font if n[0] == "thickness"][0]
if (Decimal(size[1]) != 1 or Decimal(size[2]) != 1):
errs.append("Font must all be 1mm x 1mm size")
if Decimal(thickness[1]) != Decimal("0.15"):
errs.append("Font must be 0.15mm line thickness")
def checklines(mod, errs, check_layers, check_width):
line_types = ("fp_line", "fp_circle", "fp_arc", "fp_poly", "fp_curve")
for line in (node for node in mod if node[0] in line_types):
layer = [n for n in line if n[0] == "layer"][0]
width = [n for n in line if n[0] == "width"][0]
if layer[1] in check_layers:
if Decimal(width[1]) != Decimal(check_width):
errs.append("Lines on {} must be {}mm wide"
.format(check_layers, check_width))
def checkctyd(mod, errs):
found_ctyd = False
for ctyd in (node for node in mod if node[0] == "fp_line"):
layer = [n for n in ctyd if n[0] == "layer"][0]
width = [n for n in ctyd if n[0] == "width"][0]
start = [n for n in ctyd if n[0] == "start"][0]
end = [n for n in ctyd if n[0] == "end"][0]
ctyd_layers = ("F.CrtYd", "B.CrtYd")
if layer[1] in ctyd_layers:
found_ctyd = True
if Decimal(width[1]) != Decimal("0.01"):
errs.append("Courtyard lines must be 0.01mm wide")
if (Decimal(start[1]) % Decimal("0.05") != 0
or Decimal(start[2]) % Decimal("0.05") != 0
or Decimal(end[1]) % Decimal("0.05") != 0
or Decimal(end[2]) % Decimal("0.05") != 0):
errs.append("Courtyard lines must lie on a 0.05mm grid")
if not found_ctyd:
errs.append("No courtyard found")
def checkmod(path, verbose=False):
errs = []
with open(path) as f:
mod = sexp_parse(f.read())
checkrefval(mod, errs)
checkfont(mod, errs)
checklines(mod, errs, ("F.SilkS", "B.SilkS"), "0.15")
checklines(mod, errs, ("F.Fab", "B.Fab"), "0.01")
checkctyd(mod, errs)
if len(errs) == 0:
if verbose:
print("Checked '{}': OK".format(path))
return True
else:
print("Checked '{}': Error:".format(path), file=sys.stderr)
for err in errs:
print(" " + err, file=sys.stderr)
print("", file=sys.stderr)
return False
def main(prettypath, verbose=False):
ok = True
for f in glob.glob(os.path.join(prettypath, "*.kicad_mod")):
result = checkmod(f, verbose)
if not result:
ok = False
return ok
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("prettypath", type=str,
help="Path to footprints")
parser.add_argument("--verbose", action="store_true",
help="Print out every footprint checked even if OK")
args = vars(parser.parse_args())
result = main(**args)
sys.exit(0 if result else 1)
| adamgreig/agg-kicad | scripts/check_mod.py | Python | mit | 4,233 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.