text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: UTF-8 -*-
import logging
unicode_string = u"Татьяна"
utf8_string = "'Татьяна' is an invalid string value"
logging.warning(unicode_string)
logging.warning(utf8_string)
try:
raise Exception(utf8_string)
except Exception,e:
print "--- (Log a traceback of the exception):"
logging.exception(e)
print "--- Everything okay until here, but now we run into trouble:"
logging.warning(u"1 Deferred %s : %s",unicode_string,e)
logging.warning(u"2 Deferred %s : %s",unicode_string,utf8_string)
print "--- some workarounds:"
logging.warning(u"3 Deferred %s : %s",unicode_string,utf8_string.decode('UTF-8'))
from django.utils.encoding import force_unicode
logging.warning(u"4 Deferred %s : %s",unicode_string,force_unicode(utf8_string))
| lsaffre/blog | docs/blog/2011/0527.py | Python | agpl-3.0 | 806 | 0.017677 |
# encoding: utf-8
" This sub-module provides 'sequence awareness' for blessed."
__author__ = 'Jeff Quast <contact@jeffquast.com>'
__license__ = 'MIT'
__all__ = ('init_sequence_patterns', 'Sequence', 'SequenceTextWrapper',)
# built-ins
import functools
import textwrap
import warnings
import math
import sys
import re
# local
from ._binterms import binary_terminals as _BINTERM_UNSUPPORTED
# 3rd-party
import wcwidth # https://github.com/jquast/wcwidth
_BINTERM_UNSUPPORTED_MSG = (
u"Terminal kind {0!r} contains binary-packed capabilities, blessed "
u"is likely to fail to measure the length of its sequences.")
if sys.version_info[0] == 3:
text_type = str
else:
text_type = unicode # noqa
def _merge_sequences(inp):
"""Merge a list of input sequence patterns for use in a regular expression.
Order by lengthyness (full sequence set precedent over subset),
and exclude any empty (u'') sequences.
"""
return sorted(list(filter(None, inp)), key=len, reverse=True)
def _build_numeric_capability(term, cap, optional=False,
base_num=99, nparams=1):
""" Build regexp from capabilities having matching numeric
parameter contained within termcap value: n->(\d+).
"""
_cap = getattr(term, cap)
opt = '?' if optional else ''
if _cap:
args = (base_num,) * nparams
cap_re = re.escape(_cap(*args))
for num in range(base_num - 1, base_num + 2):
# search for matching ascii, n-1 through n+1
if str(num) in cap_re:
# modify & return n to matching digit expression
cap_re = cap_re.replace(str(num), r'(\d+)%s' % (opt,))
return cap_re
warnings.warn('Unknown parameter in %r (%r, %r)' % (cap, _cap, cap_re))
return None # no such capability
def _build_any_numeric_capability(term, cap, num=99, nparams=1):
""" Build regexp from capabilities having *any* digit parameters
(substitute matching \d with pattern \d and return).
"""
_cap = getattr(term, cap)
if _cap:
cap_re = re.escape(_cap(*((num,) * nparams)))
cap_re = re.sub('(\d+)', r'(\d+)', cap_re)
if r'(\d+)' in cap_re:
return cap_re
warnings.warn('Missing numerics in %r, %r' % (cap, cap_re))
return None # no such capability
def get_movement_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
to cause movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
return set([
# carriage_return
re.escape(term.cr),
# column_address: Horizontal position, absolute
bnc(cap='hpa'),
# row_address: Vertical position #1 absolute
bnc(cap='vpa'),
# cursor_address: Move to row #1 columns #2
bnc(cap='cup', nparams=2),
# cursor_down: Down one line
re.escape(term.cud1),
# cursor_home: Home cursor (if no cup)
re.escape(term.home),
# cursor_left: Move left one space
re.escape(term.cub1),
# cursor_right: Non-destructive space (move right one space)
re.escape(term.cuf1),
# cursor_up: Up one line
re.escape(term.cuu1),
# param_down_cursor: Down #1 lines
bnc(cap='cud', optional=True),
# restore_cursor: Restore cursor to position of last save_cursor
re.escape(term.rc),
# clear_screen: clear screen and home cursor
re.escape(term.clear),
# enter/exit_fullscreen: switch to alternate screen buffer
re.escape(term.enter_fullscreen),
re.escape(term.exit_fullscreen),
# forward cursor
term._cuf,
# backward cursor
term._cub,
])
def get_wontmove_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
not to cause any movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
bna = functools.partial(_build_any_numeric_capability, term)
return list([
# print_screen: Print contents of screen
re.escape(term.mc0),
# prtr_off: Turn off printer
re.escape(term.mc4),
# prtr_on: Turn on printer
re.escape(term.mc5),
# save_cursor: Save current cursor position (P)
re.escape(term.sc),
# set_tab: Set a tab in every row, current columns
re.escape(term.hts),
# enter_bold_mode: Turn on bold (extra bright) mode
re.escape(term.bold),
# enter_standout_mode
re.escape(term.standout),
# enter_subscript_mode
re.escape(term.subscript),
# enter_superscript_mode
re.escape(term.superscript),
# enter_underline_mode: Begin underline mode
re.escape(term.underline),
# enter_blink_mode: Turn on blinking
re.escape(term.blink),
# enter_dim_mode: Turn on half-bright mode
re.escape(term.dim),
# cursor_invisible: Make cursor invisible
re.escape(term.civis),
# cursor_visible: Make cursor very visible
re.escape(term.cvvis),
# cursor_normal: Make cursor appear normal (undo civis/cvvis)
re.escape(term.cnorm),
# clear_all_tabs: Clear all tab stops
re.escape(term.tbc),
# change_scroll_region: Change region to line #1 to line #2
bnc(cap='csr', nparams=2),
# clr_bol: Clear to beginning of line
re.escape(term.el1),
# clr_eol: Clear to end of line
re.escape(term.el),
# clr_eos: Clear to end of screen
re.escape(term.clear_eos),
# delete_character: Delete character
re.escape(term.dch1),
# delete_line: Delete line (P*)
re.escape(term.dl1),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# insert_line: Insert line (P*)
re.escape(term.il1),
# parm_dch: Delete #1 characters
bnc(cap='dch'),
# parm_delete_line: Delete #1 lines
bnc(cap='dl'),
# exit_alt_charset_mode: End alternate character set (P)
re.escape(term.rmacs),
# exit_am_mode: Turn off automatic margins
re.escape(term.rmam),
# exit_attribute_mode: Turn off all attributes
re.escape(term.sgr0),
# exit_ca_mode: Strings to end programs using cup
re.escape(term.rmcup),
# exit_insert_mode: Exit insert mode
re.escape(term.rmir),
# exit_standout_mode: Exit standout mode
re.escape(term.rmso),
# exit_underline_mode: Exit underline mode
re.escape(term.rmul),
# flash_hook: Flash switch hook
re.escape(term.hook),
# flash_screen: Visible bell (may not move cursor)
re.escape(term.flash),
# keypad_local: Leave 'keyboard_transmit' mode
re.escape(term.rmkx),
# keypad_xmit: Enter 'keyboard_transmit' mode
re.escape(term.smkx),
# meta_off: Turn off meta mode
re.escape(term.rmm),
# meta_on: Turn on meta mode (8th-bit on)
re.escape(term.smm),
# orig_pair: Set default pair to its original value
re.escape(term.op),
# parm_ich: Insert #1 characters
bnc(cap='ich'),
# parm_index: Scroll forward #1
bnc(cap='indn'),
# parm_insert_line: Insert #1 lines
bnc(cap='il'),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# parm_rindex: Scroll back #1 lines
bnc(cap='rin'),
# parm_up_cursor: Up #1 lines
bnc(cap='cuu'),
# scroll_forward: Scroll text up (P)
re.escape(term.ind),
# scroll_reverse: Scroll text down (P)
re.escape(term.rev),
# tab: Tab to next 8-space hardware tab stop
re.escape(term.ht),
# set_a_background: Set background color to #1, using ANSI escape
bna(cap='setab', num=1),
bna(cap='setab', num=(term.number_of_colors - 1)),
# set_a_foreground: Set foreground color to #1, using ANSI escape
bna(cap='setaf', num=1),
bna(cap='setaf', num=(term.number_of_colors - 1)),
] + [
# set_attributes: Define video attributes #1-#9 (PG9)
# ( not *exactly* legal, being extra forgiving. )
bna(cap='sgr', nparams=_num) for _num in range(1, 10)
# reset_{1,2,3}string: Reset string
] + list(map(re.escape, (term.r1, term.r2, term.r3,))))
def init_sequence_patterns(term):
"""Given a Terminal instance, ``term``, this function processes
and parses several known terminal capabilities, and builds and
returns a dictionary database of regular expressions, which may
be re-attached to the terminal by attributes of the same key-name:
``_re_will_move``
any sequence matching this pattern will cause the terminal
cursor to move (such as *term.home*).
``_re_wont_move``
any sequence matching this pattern will not cause the cursor
to move (such as *term.bold*).
``_re_cuf``
regular expression that matches term.cuf(N) (move N characters forward),
or None if temrinal is without cuf sequence.
``_cuf1``
*term.cuf1* sequence (cursor forward 1 character) as a static value.
``_re_cub``
regular expression that matches term.cub(N) (move N characters backward),
or None if terminal is without cub sequence.
``_cub1``
*term.cuf1* sequence (cursor backward 1 character) as a static value.
These attributes make it possible to perform introspection on strings
containing sequences generated by this terminal, to determine the
printable length of a string.
"""
if term.kind in _BINTERM_UNSUPPORTED:
warnings.warn(_BINTERM_UNSUPPORTED_MSG.format(term.kind))
# Build will_move, a list of terminal capabilities that have
# indeterminate effects on the terminal cursor position.
_will_move = set()
if term.does_styling:
_will_move = _merge_sequences(get_movement_sequence_patterns(term))
# Build wont_move, a list of terminal capabilities that mainly affect
# video attributes, for use with measure_length().
_wont_move = set()
if term.does_styling:
_wont_move = _merge_sequences(get_wontmove_sequence_patterns(term))
_wont_move += [
# some last-ditch match efforts; well, xterm and aixterm is going
# to throw \x1b(B and other oddities all around, so, when given
# input such as ansi art (see test using wall.ans), and well,
# theres no reason a vt220 terminal shouldn't be able to recognize
# blue_on_red, even if it didn't cause it to be generated. these
# are final "ok, i will match this, anyway"
re.escape(u'\x1b') + r'\[(\d+)m',
re.escape(u'\x1b') + r'\[(\d+)\;(\d+)m',
re.escape(u'\x1b') + r'\[(\d+)\;(\d+)\;(\d+)m',
re.escape(u'\x1b') + r'\[(\d+)\;(\d+)\;(\d+)\;(\d+)m',
re.escape(u'\x1b(B'),
]
# compile as regular expressions, OR'd.
_re_will_move = re.compile('(%s)' % ('|'.join(_will_move)))
_re_wont_move = re.compile('(%s)' % ('|'.join(_wont_move)))
# static pattern matching for horizontal_distance(ucs, term)
bnc = functools.partial(_build_numeric_capability, term)
# parm_right_cursor: Move #1 characters to the right
_cuf = bnc(cap='cuf', optional=True)
_re_cuf = re.compile(_cuf) if _cuf else None
# cursor_right: Non-destructive space (move right one space)
_cuf1 = term.cuf1
# parm_left_cursor: Move #1 characters to the left
_cub = bnc(cap='cub', optional=True)
_re_cub = re.compile(_cub) if _cub else None
# cursor_left: Move left one space
_cub1 = term.cub1
return {'_re_will_move': _re_will_move,
'_re_wont_move': _re_wont_move,
'_re_cuf': _re_cuf,
'_re_cub': _re_cub,
'_cuf1': _cuf1,
'_cub1': _cub1, }
class SequenceTextWrapper(textwrap.TextWrapper):
def __init__(self, width, term, **kwargs):
self.term = term
textwrap.TextWrapper.__init__(self, width, **kwargs)
def _wrap_chunks(self, chunks):
"""
escape-sequence aware variant of _wrap_chunks. Though
movement sequences, such as term.left() are certainly not
honored, sequences such as term.bold() are, and are not
broken mid-sequence.
"""
lines = []
if self.width <= 0 or not isinstance(self.width, int):
raise ValueError("invalid width %r(%s) (must be integer > 0)" % (
self.width, type(self.width)))
term = self.term
drop_whitespace = not hasattr(self, 'drop_whitespace'
) or self.drop_whitespace
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - len(indent)
if drop_whitespace and (
Sequence(chunks[-1], term).strip() == '' and lines):
del chunks[-1]
while chunks:
chunk_len = Sequence(chunks[-1], term).length()
if cur_len + chunk_len <= width:
cur_line.append(chunks.pop())
cur_len += chunk_len
else:
break
if chunks and Sequence(chunks[-1], term).length() > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if drop_whitespace and (
cur_line and Sequence(cur_line[-1], term).strip() == ''):
del cur_line[-1]
if cur_line:
lines.append(indent + u''.join(cur_line))
return lines
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
term = self.term
chunk = reversed_chunks[-1]
nxt = 0
for idx in range(0, len(chunk)):
if idx == nxt:
# at sequence, point beyond it,
nxt = idx + measure_length(chunk[idx:], term)
if nxt <= idx:
# point beyond next sequence, if any,
# otherwise point to next character
nxt = idx + measure_length(chunk[idx:], term) + 1
if Sequence(chunk[:nxt], term).length() > space_left:
break
else:
# our text ends with a sequence, such as in text
# u'!\x1b(B\x1b[m', set index at at end (nxt)
idx = nxt
cur_line.append(chunk[:idx])
reversed_chunks[-1] = chunk[idx:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
SequenceTextWrapper.__doc__ = textwrap.TextWrapper.__doc__
class Sequence(text_type):
"""
This unicode-derived class understands the effect of escape sequences
of printable length, allowing a properly implemented .rjust(), .ljust(),
.center(), and .len()
"""
def __new__(cls, sequence_text, term):
"""Sequence(sequence_text, term) -> unicode object
:arg sequence_text: A string containing sequences.
:arg term: Terminal instance this string was created with.
"""
new = text_type.__new__(cls, sequence_text)
new._term = term
return new
def ljust(self, width, fillchar=u' '):
"""S.ljust(width, fillchar) -> unicode
Returns string derived from unicode string ``S``, left-adjusted
by trailing whitespace padding ``fillchar``."""
rightside = fillchar * int((max(0.0, float(width - self.length())))
/ float(len(fillchar)))
return u''.join((self, rightside))
def rjust(self, width, fillchar=u' '):
"""S.rjust(width, fillchar=u'') -> unicode
Returns string derived from unicode string ``S``, right-adjusted
by leading whitespace padding ``fillchar``."""
leftside = fillchar * int((max(0.0, float(width - self.length())))
/ float(len(fillchar)))
return u''.join((leftside, self))
def center(self, width, fillchar=u' '):
"""S.center(width, fillchar=u'') -> unicode
Returns string derived from unicode string ``S``, centered
and surrounded with whitespace padding ``fillchar``."""
split = max(0.0, float(width) - self.length()) / 2
leftside = fillchar * int((max(0.0, math.floor(split)))
/ float(len(fillchar)))
rightside = fillchar * int((max(0.0, math.ceil(split)))
/ float(len(fillchar)))
return u''.join((leftside, self, rightside))
def length(self):
"""S.length() -> int
Returns printable length of unicode string ``S`` that may contain
terminal sequences.
Although accounted for, strings containing sequences such as
``term.clear`` will not give accurate returns, it is not
considered lengthy (a length of 0). Combining characters,
are also not considered lengthy.
Strings containing ``term.left`` or ``\b`` will cause "overstrike",
but a length less than 0 is not ever returned. So ``_\b+`` is a
length of 1 (``+``), but ``\b`` is simply a length of 0.
Some characters may consume more than one cell, mainly those CJK
Unified Ideographs (Chinese, Japanese, Korean) defined by Unicode
as half or full-width characters.
For example:
>>> from blessed import Terminal
>>> from blessed.sequences import Sequence
>>> term = Terminal()
>>> Sequence(term.clear + term.red(u'コンニチハ')).length()
5
"""
# because combining characters may return -1, "clip" their length to 0.
clip = functools.partial(max, 0)
return sum(clip(wcwidth.wcwidth(w_char))
for w_char in self.strip_seqs())
def strip(self, chars=None):
"""S.strip([chars]) -> unicode
Return a copy of the string S with terminal sequences removed, and
leading and trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return self.strip_seqs().strip(chars)
def lstrip(self, chars=None):
"""S.lstrip([chars]) -> unicode
Return a copy of the string S with terminal sequences and leading
whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return self.strip_seqs().lstrip(chars)
def rstrip(self, chars=None):
"""S.rstrip([chars]) -> unicode
Return a copy of the string S with terminal sequences and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return self.strip_seqs().rstrip(chars)
def strip_seqs(self):
"""S.strip_seqs() -> unicode
Return a string without sequences for a string that contains
sequences for the Terminal with which they were created.
Where sequence ``move_right(n)`` is detected, it is replaced with
``n * u' '``, and where ``move_left()`` or ``\\b`` is detected,
those last-most characters are destroyed.
All other sequences are simply removed. An example,
>>> from blessed import Terminal
>>> from blessed.sequences import Sequence
>>> term = Terminal()
>>> Sequence(term.clear + term.red(u'test')).strip_seqs()
u'test'
"""
# nxt: points to first character beyond current escape sequence.
# width: currently estimated display length.
input = self.padd()
outp = u''
nxt = 0
for idx in range(0, len(input)):
if idx == nxt:
# at sequence, point beyond it,
nxt = idx + measure_length(input[idx:], self._term)
if nxt <= idx:
# append non-sequence to outp,
outp += input[idx]
# point beyond next sequence, if any,
# otherwise point to next character
nxt = idx + measure_length(input[idx:], self._term) + 1
return outp
def padd(self):
"""S.padd() -> unicode
Make non-destructive space or backspace into destructive ones.
Where sequence ``move_right(n)`` is detected, it is replaced with
``n * u' '``. Where sequence ``move_left(n)`` or ``\\b`` is
detected, those last-most characters are destroyed.
"""
outp = u''
nxt = 0
for idx in range(0, text_type.__len__(self)):
width = horizontal_distance(self[idx:], self._term)
if width != 0:
nxt = idx + measure_length(self[idx:], self._term)
if width > 0:
outp += u' ' * width
elif width < 0:
outp = outp[:width]
if nxt <= idx:
outp += self[idx]
nxt = idx + 1
return outp
def measure_length(ucs, term):
"""measure_length(S, term) -> int
Returns non-zero for string ``S`` that begins with a terminal sequence,
that is: the width of the first unprintable sequence found in S. For use
as a *next* pointer to skip past sequences. If string ``S`` is not a
sequence, 0 is returned.
A sequence may be a typical terminal sequence beginning with Escape
(``\x1b``), especially a Control Sequence Initiator (``CSI``, ``\x1b[``,
...), or those of ``\a``, ``\b``, ``\r``, ``\n``, ``\xe0`` (shift in),
``\x0f`` (shift out). They do not necessarily have to begin with CSI, they
need only match the capabilities of attributes ``_re_will_move`` and
``_re_wont_move`` of terminal ``term``.
"""
# simple terminal control characters,
ctrl_seqs = u'\a\b\r\n\x0e\x0f'
if any([ucs.startswith(_ch) for _ch in ctrl_seqs]):
return 1
# known multibyte sequences,
matching_seq = term and (
term._re_will_move.match(ucs) or
term._re_wont_move.match(ucs) or
term._re_cub and term._re_cub.match(ucs) or
term._re_cuf and term._re_cuf.match(ucs)
)
if matching_seq:
start, end = matching_seq.span()
return end
# none found, must be printable!
return 0
def termcap_distance(ucs, cap, unit, term):
"""termcap_distance(S, cap, unit, term) -> int
Match horizontal distance by simple ``cap`` capability name, ``cub1`` or
``cuf1``, with string matching the sequences identified by Terminal
instance ``term`` and a distance of ``unit`` *1* or *-1*, for right and
left, respectively.
Otherwise, by regular expression (using dynamic regular expressions built
using ``cub(n)`` and ``cuf(n)``. Failing that, any of the standard SGR
sequences (``\033[C``, ``\033[D``, ``\033[nC``, ``\033[nD``).
Returns 0 if unmatched.
"""
assert cap in ('cuf', 'cub')
# match cub1(left), cuf1(right)
one = getattr(term, '_%s1' % (cap,))
if one and ucs.startswith(one):
return unit
# match cub(n), cuf(n) using regular expressions
re_pattern = getattr(term, '_re_%s' % (cap,))
_dist = re_pattern and re_pattern.match(ucs)
if _dist:
return unit * int(_dist.group(1))
return 0
def horizontal_distance(ucs, term):
"""horizontal_distance(S, term) -> int
Returns Integer ``<n>`` in SGR sequence of form ``<ESC>[<n>C``
(T.move_right(n)), or ``-(n)`` in sequence of form ``<ESC>[<n>D``
(T.move_left(n)). Returns -1 for backspace (0x08), Otherwise 0.
Tabstop (``\t``) cannot be correctly calculated, as the relative column
position cannot be determined: 8 is always (and, incorrectly) returned.
"""
if ucs.startswith('\b'):
return -1
elif ucs.startswith('\t'):
# As best as I can prove it, a tabstop is always 8 by default.
# Though, given that blessings is:
#
# 1. unaware of the output device's current cursor position, and
# 2. unaware of the location the callee may chose to output any
# given string,
#
# It is not possible to determine how many cells any particular
# \t would consume on the output device!
return 8
return (termcap_distance(ucs, 'cub', -1, term) or
termcap_distance(ucs, 'cuf', 1, term) or
0)
| AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/blessed/sequences.py | Python | mit | 26,038 | 0.000154 |
# -*- coding: utf-8 -*-
from .main import app
from . import views
"""
Initialize the package
"""
| stxnext-kindergarten/presence-analyzer-agrochowski | src/presence_analyzer/__init__.py | Python | mit | 97 | 0 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.contrib import certificates
from nova import context
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_root_cert(context, *args, **kwargs):
return 'fakeroot'
def fake_create_cert(context, *args, **kwargs):
return 'fakepk', 'fakecert'
class CertificatesTest(test.NoDBTestCase):
def setUp(self):
super(CertificatesTest, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.controller = certificates.CertificatesController()
def test_translate_certificate_view(self):
pk, cert = fake_create_cert(self.context)
view = certificates._translate_certificate_view(cert, pk)
self.assertEqual(view['data'], cert)
self.assertEqual(view['private_key'], pk)
def test_certificates_show_root(self):
self.stubs.Set(rpc, 'call', fake_get_root_cert)
req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root')
res_dict = self.controller.show(req, 'root')
cert = fake_get_root_cert(self.context)
response = {'certificate': {'data': cert, 'private_key': None}}
self.assertEqual(res_dict, response)
def test_certificates_create_certificate(self):
self.stubs.Set(rpc, 'call', fake_create_cert)
req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/')
res_dict = self.controller.create(req)
pk, cert = fake_create_cert(self.context)
response = {'certificate': {'data': cert, 'private_key': pk}}
self.assertEqual(res_dict, response)
class CertificatesSerializerTest(test.NoDBTestCase):
def test_index_serializer(self):
serializer = certificates.CertificateTemplate()
text = serializer.serialize(dict(
certificate=dict(
data='fakecert',
private_key='fakepk'),
))
tree = etree.fromstring(text)
self.assertEqual('certificate', tree.tag)
self.assertEqual('fakepk', tree.get('private_key'))
self.assertEqual('fakecert', tree.get('data'))
| ntt-sic/nova | nova/tests/api/openstack/compute/contrib/test_certificates.py | Python | apache-2.0 | 2,810 | 0 |
from gi.repository import Gtk, Gdk,GObject,Pango
import commands
import time
import sys,os
import threading
import sqlite3
from config_note import Config
config_note = Config()
path = "/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/turbonote-adds/"
path_icon = "/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/"
stay = ""
connb = sqlite3.connect(path + 'turbo.db')
a = connb.cursor()
a.execute("SELECT * FROM notestyle")
rows = a.fetchall()
f1 = (str(rows[0][0]))
f2 = (str(rows[0][1]))
f3 = (str(rows[0][2]))
f4 = (str(rows[0][3]))
f5 = str(rows[0][4])
f6 = str(rows[0][5])
connb.close()
def setF1(f):
global f1
f1 = f
def setF2(f):
global f2
f2 = f
def setF3(f):
global f3
f3 = f
def setF4(f):
global f4
f4 = f
def setF5(f):
global f5
f5 = f
def setF6(f):
global f6
f6 = f
class WindowStyle(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="SVN UPDATE")
self.set_default_size(520, 400)
self.set_border_width(15)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_resizable( False )
hb = Gtk.HeaderBar()
hb.props.show_close_button = True
hb.props.title = "NOTE STYLE FOR WINDOWS VIEW"
self.set_titlebar(hb)
self.grid = Gtk.Grid()
self.add(self.grid)
self.space = Gtk.Label()
self.space.set_text(" ")
self.space2 = Gtk.Label()
self.space2.set_text(" ")
self.space3 = Gtk.Label()
self.space3.set_text(" ")
self.space4 = Gtk.Label()
self.space4.set_text(" ")
self.space5 = Gtk.Label()
self.space5.set_text(" ")
self.title_body = Gtk.Label()
self.title_body.set_text("Body Components")
self.title_title = Gtk.Label()
self.title_title.set_text("Title Components")
self.noteTextLabel = Gtk.Label("\n\n\n\n\n Select font for text note... \n\n\n\n\n")
self.noteTextTitle = Gtk.Label(" Note Title... ")
fontbt = Gtk.Button()
fontbt.set_tooltip_text("Body font")
fontbt.connect("clicked", self.on_clickedTextFont)
fontcolorbt = Gtk.Button()
fontcolorbt.set_tooltip_text("Text body color")
fontcolorbt.connect("clicked", self.on_clickedTextColor)
fontbtTitle = Gtk.Button()
fontbtTitle.set_tooltip_text("Font title")
fontbtTitle.connect("clicked", self.on_clickedTextFontTitle)
fontcolorbtTitle = Gtk.Button()
fontcolorbtTitle.set_tooltip_text("title text color")
fontcolorbtTitle.connect("clicked", self.on_clickedTextColorTitle)
bodyColor = Gtk.Button()
bodyColor.set_tooltip_text("Body Color")
bodyColor.connect("clicked", self.on_clickedTextColorBody)
bodytitleColor = Gtk.Button()
bodytitleColor.set_tooltip_text("Title color")
bodytitleColor.connect("clicked", self.on_clickedTextColorTitleBody)
save = Gtk.Button()
save.set_tooltip_text("Save Config")
save.connect("clicked", self.on_save)
self.colorBody = Gtk.Image()
self.colorBody.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventb" + config_note.getColor() + ".png")
bodyColor.add(self.colorBody)
self.colorTextBody = Gtk.Image()
self.colorTextBody.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventtb" + config_note.getColor() + ".png")
fontcolorbt.add(self.colorTextBody)
self.fontTextBody = Gtk.Image()
self.fontTextBody.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventt" + config_note.getColor() + ".png")
fontbt.add(self.fontTextBody)
self.colorBodyTitle = Gtk.Image()
self.colorBodyTitle.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventb" + config_note.getColor() + ".png")
fontcolorbtTitle.add(self.colorBodyTitle)
self.colorTextBodyTitle = Gtk.Image()
self.colorTextBodyTitle.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventtb" + config_note.getColor() + ".png")
bodytitleColor.add(self.colorTextBodyTitle)
self.fontTextBodyTitle = Gtk.Image()
self.fontTextBodyTitle.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventt" + config_note.getColor() + ".png")
fontbtTitle.add(self.fontTextBodyTitle)
self.saveimg = Gtk.Image()
self.saveimg.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_save" + config_note.getColor() + ".png")
save.add(self.saveimg)
self.grid.attach(self.title_body, 0,0 , 3 , 1)
self.grid.attach(self.space2, 0,1 , 1 , 1)
self.grid.attach(bodyColor ,0,2 , 1 , 1)
self.grid.attach(fontcolorbt ,1,2 , 1 , 1)
self.grid.attach(fontbt ,2,2 , 1 , 1)
self.grid.attach(self.space, 3,2 , 1 , 3)
self.grid.attach(self.noteTextTitle, 4,0 , 1 , 2)
self.grid.attach(self.noteTextLabel, 4,1 , 1 , 8)
self.grid.attach(self.space3, 0,3 , 3 , 1)
self.grid.attach(self.title_title, 0,4 , 3 , 1)
self.grid.attach(self.space4, 0,5 , 3 , 1)
self.grid.attach(fontbtTitle, 2,6 , 1 , 1)
self.grid.attach(bodytitleColor, 1,6 , 1 , 1)
self.grid.attach(fontcolorbtTitle, 0,6, 1 , 1)
self.grid.attach(self.space5, 0,7 , 3 , 1)
self.grid.attach(save, 0,8 , 3 , 1)
font1 = Gdk.RGBA()
font2 = Gdk.RGBA()
font3 = Gdk.RGBA()
font4 = Gdk.RGBA()
connb = sqlite3.connect(path + 'turbo.db')
a = connb.cursor()
a.execute("SELECT * FROM notestyle")
rows = a.fetchall()
font1.parse(str(rows[0][0]))
font2.parse(str(rows[0][1]))
font3.parse(str(rows[0][2]))
font4.parse(str(rows[0][3]))
fontbt = str(rows[0][4])
fontbb = str(rows[0][5])
connb.close()
self.noteTextTitle.override_color(Gtk.StateFlags.NORMAL, font3)
self.noteTextTitle.override_background_color(Gtk.StateFlags.NORMAL, font1)
self.noteTextLabel.override_color(Gtk.StateFlags.NORMAL, font4)
self.noteTextLabel.override_background_color(Gtk.StateFlags.NORMAL, font2)
self.noteTextTitle.modify_font(Pango.FontDescription(fontbt))
self.noteTextLabel.modify_font(Pango.FontDescription(fontbb))
def rgb_to_hex(self,rgb):
return '#%02x%02x%02x' % rgb
def on_clickedTextColorTitleBody(self, widget):
cdia = Gtk.ColorSelectionDialog("Select color")
response = cdia.run()
if response == Gtk.ResponseType.OK:
colorsel = cdia.get_color_selection()
rgb = colorsel.get_current_rgba().to_string()
rgb = rgb.replace("rgb","").replace("(","").replace(")","").split(',')
setF1(self.rgb_to_hex((int(rgb[0]), int(rgb[1]), int(rgb[2]))))
self.noteTextTitle.override_background_color(Gtk.StateFlags.NORMAL, colorsel.get_current_rgba())
cdia.destroy()
def on_save(self, widget):
connb = sqlite3.connect(path + 'turbo.db')
a = connb.cursor()
a.execute("UPDATE notestyle SET titulo_color ='" + f1 +"',body_color='" + f2 + "',titulo_font_color ='" + f3 + "',body_font_color ='" + f4 + "',titulo_font_type='" + f5 + "',body_font_type = '" + f6 + "' where 1=1;")
connb.commit()
connb.close()
def on_clickedTextColorBody(self, widget):
cdia = Gtk.ColorSelectionDialog("Select color")
response = cdia.run()
if response == Gtk.ResponseType.OK:
colorsel = cdia.get_color_selection()
rgb = colorsel.get_current_rgba().to_string()
rgb = rgb.replace("rgb","").replace("(","").replace(")","").split(',')
setF2(self.rgb_to_hex((int(rgb[0]), int(rgb[1]), int(rgb[2]))))
self.noteTextLabel.override_background_color(Gtk.StateFlags.NORMAL, colorsel.get_current_rgba())
cdia.destroy()
def on_clickedTextColor(self, widget):
cdia = Gtk.ColorSelectionDialog("Select color")
response = cdia.run()
if response == Gtk.ResponseType.OK:
colorsel = cdia.get_color_selection()
rgb = colorsel.get_current_rgba().to_string()
rgb = rgb.replace("rgb","").replace("(","").replace(")","").split(',')
setF4(self.rgb_to_hex((int(rgb[0]), int(rgb[1]), int(rgb[2]))))
self.noteTextLabel.override_color(Gtk.StateFlags.NORMAL, colorsel.get_current_rgba())
cdia.destroy()
def on_clickedTextFont(self, widget):
fdia = Gtk.FontSelectionDialog("Select font name")
response = fdia.run()
if response == Gtk.ResponseType.OK:
font_desc = Pango.FontDescription(fdia.get_font_name())
setF6(font_desc.get_family())
if font_desc:
self.noteTextLabel.modify_font(font_desc)
fdia.destroy()
def on_clickedTextColorTitle(self, widget):
cdia = Gtk.ColorSelectionDialog("Select color")
response = cdia.run()
if response == Gtk.ResponseType.OK:
colorsel = cdia.get_color_selection()
rgb = colorsel.get_current_rgba().to_string()
rgb = rgb.replace("rgb","").replace("(","").replace(")","").split(',')
setF3(self.rgb_to_hex((int(rgb[0]), int(rgb[1]), int(rgb[2]))))
self.noteTextTitle.override_color(Gtk.StateFlags.NORMAL, colorsel.get_current_rgba())
cdia.destroy()
def on_clickedTextFontTitle(self, widget):
fdia = Gtk.FontSelectionDialog("Select font name")
response = fdia.run()
if response == Gtk.ResponseType.OK:
font_desc = Pango.FontDescription(fdia.get_font_name())
if font_desc:
setF5(font_desc.get_family())
self.noteTextTitle.modify_font(font_desc)
fdia.destroy()
| iksws/GnomeTurboNoteExtension | turbonote-adds/notestyle.py | Python | gpl-3.0 | 10,601 | 0.016602 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/any.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/any.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42o\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z%github.com/golang/protobuf/ptypes/any\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_ANY = _descriptor.Descriptor(
name='Any',
full_name='google.protobuf.Any',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Any.type_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Any.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=84,
)
DESCRIPTOR.message_types_by_name['Any'] = _ANY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Any = _reflection.GeneratedProtocolMessageType('Any', (_message.Message,), dict(
DESCRIPTOR = _ANY,
__module__ = 'google.protobuf.any_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Any)
))
_sym_db.RegisterMessage(Any)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\010AnyProtoP\001Z%github.com/golang/protobuf/ptypes/any\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/google/protobuf/any_pb2.py | Python | bsd-2-clause | 2,652 | 0.006787 |
import logging
from tkp.accessors import CasaImage
logger = logging.getLogger(__name__)
class AartfaacCasaImage(CasaImage):
def __init__(self, url, plane=0, beam=None):
super(AartfaacCasaImage, self).__init__(url, plane=0, beam=None)
self.taustart_ts = self.parse_taustartts()
self.telescope = self.table.getkeyword('coords')['telescope']
# TODO: header does't contain integration time
# aartfaac imaginig pipeline issue #25
self.tau_time = 1
def parse_frequency(self):
"""
Extract frequency related information from headers
(Overrides the implementation in CasaImage, which pulls the entries
from the 'spectral2' sub-table.)
"""
keywords = self.table.getkeywords()
# due to some undocumented casacore feature, the 'spectral' keyword
# changes from spectral1 to spectral2 when AARTFAAC imaging developers
# changed some of the header information. For now we will try both
# locations.
if 'spectral1' in keywords['coords']:
keyword = 'spectral1'
if 'spectral2' in keywords['coords']:
keyword = 'spectral2'
freq_eff = keywords['coords'][keyword]['restfreq']
freq_bw = keywords['coords'][keyword]['wcs']['cdelt']
return freq_eff, freq_bw
| bartscheers/tkp | tkp/accessors/aartfaaccasaimage.py | Python | bsd-2-clause | 1,347 | 0 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import XHR
import scraper
VIDEO_URL = '/video_info/iframe'
class Scraper(scraper.Scraper):
OPTIONS = ['https://xmovies8.org', 'https://putlockerhd.co', 'https://afdah.org', 'https://watch32hd.co']
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'xmovies8'
def resolve_link(self, link):
link = link.split('|', 1)[0]
html = self._http_get(link, allow_redirect=False, method='HEAD', cache_limit=0)
if html.startswith('http'):
return html
else:
return link
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=0)
match = re.search('var\s*video_id\s*=\s*"([^"]+)', html)
if not match: return hosters
video_id = match.group(1)
headers = {'Referer': page_url}
headers.update(XHR)
_html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0)
vid_url = scraper_utils.urljoin(self.base_url, VIDEO_URL)
html = self._http_get(vid_url, data={'v': video_id}, headers=headers, cache_limit=0)
for source, value in scraper_utils.parse_json(html, vid_url).iteritems():
match = re.search('url=(.*)', value)
if not match: continue
stream_url = urllib.unquote(match.group(1))
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(source)
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/results')
params = {'q': title}
referer = search_url + '?' + urllib.urlencode(params)
headers = {'Referer': referer}
headers.update(XHR)
_html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0)
cookies = {'begin_referer': referer, 'prounder': 1}
html = self._http_get(search_url, params=params, cookies=cookies, cache_limit=8)
if any('jquery.js' in match.attrs['src'] for match in dom_parser2.parse_dom(html, 'script', req='src')):
html = self._http_get(search_url, params=params, cookies=cookies, cache_limit=0)
for _attrs, result in dom_parser2.parse_dom(html, 'div', {'class': 'cell'}):
title_frag = dom_parser2.parse_dom(result, 'div', {'class': 'video_title'})
year_frag = dom_parser2.parse_dom(result, 'div', {'class': 'video_quality'})
if not title_frag: continue
match = dom_parser2.parse_dom(title_frag[0].content, 'a', req='href')
if not match: continue
match_url = match[0].attrs['href']
match_title = match[0].content
try:
match = re.search('\s+(\d{4})\s+', year_frag[0].content)
match_year = match.group(1)
except:
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings.append(' <setting id="%s-default_url" type="text" visible="false"/>' % (cls.get_name()))
return settings
scraper_utils.set_default_url(Scraper)
| TheWardoctor/Wardoctors-repo | plugin.video.salts/scrapers/xmovies8_scraper.py | Python | apache-2.0 | 5,391 | 0.006678 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
"""The response of a single compartment neuron with leak channels to step current injection.
In this example, we build a single section neuron, with passive channels,
and stimulate it with a step current clamp of 200pA for 100ms starting at t=100ms.
We also create a summary pdf of the simulation.
"""
from morphforge.stdimports import *
from morphforgecontrib.stdimports import StdChlLeak
# Create the morphology for the cell:
morphDict1 = {'root': {'length': 20, 'diam': 20, 'id':'soma'} }
m1 = MorphologyTree.fromDictionary(morphDict1)
# Create the environment:
env = NEURONEnvironment()
# Create the simulation:
sim = env.Simulation()
# Create a cell:
cell = sim.create_cell(name="Cell1", morphology=m1)
# Apply the mechanisms to the cells
lk_chl = env.Channel(StdChlLeak,
name="LkChl",
conductance=qty("0.25:mS/cm2"),
reversalpotential=qty("-51:mV"),
)
cell.apply_channel( lk_chl)
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="Stim1", amp=qty("200:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.recordall(lk_chl, cell_location=cell.soma)
# run the simulation
results = sim.run()
# Create an output .pdf
SimulationMRedoc.build( sim ).to_pdf(__file__ + '.pdf')
# Display the results:
TagViewer([results], figtitle="The response of a neuron to step current injection", timerange=(95, 200)*units.ms, show=True)
| mikehulluk/morphforge | src/morphforgeexamples/exset2_singlecell_simulations/singlecell_simulation010.py | Python | bsd-2-clause | 3,237 | 0.006179 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Library
from ..models import Espacio
register = Library()
@register.inclusion_tag('espacios/_otros_espacios.html', takes_context=True)
def otros_espacios(context):
qs = Espacio.objects.all()
if 'espacio' in context:
obj = context['espacio']
if obj:
qs = qs.exclude(pk=obj.pk)
return {'otros_espacios': qs}
| andensinlimite/metaespacio | metaespacio/espacios/templatetags/espacios.py | Python | agpl-3.0 | 441 | 0 |
import sys
import os
import xbmc
import xbmcgui
import xbmcplugin
import threading
import socket
import urllib
from Queue import Queue
import plugins
import ConfigParser
import logging
import difflib
try: current_dlg_id = xbmcgui.getCurrentWindowDialogId()
except: current_dlg_id = 0
current_win_id = xbmcgui.getCurrentWindowId()
_ = sys.modules[ "__main__" ].__language__
__scriptname__ = sys.modules[ "__main__" ].__scriptname__
__version__ = sys.modules[ "__main__" ].__version__
STATUS_LABEL = 100
LOADING_IMAGE = 110
SUBTITLES_LIST = 120
trans_lang = {'aa' : 'Afar',
'ab' : 'Abkhaz',
'ae' : 'Avestan',
'af' : 'Afrikaans',
'ak' : 'Akan',
'am' : 'Amharic',
'an' : 'Aragonese',
'ar' : 'Arabic',
'as' : 'Assamese',
'av' : 'Avaric',
'ay' : 'Aymara',
'az' : 'Azerbaijani',
'ba' : 'Bashkir',
'be' : 'Belarusian',
'bg' : 'Bulgarian',
'bh' : 'Bihari',
'bi' : 'Bislama',
'bm' : 'Bambara',
'bn' : 'Bengali',
'bo' : 'Tibetan',
'br' : 'Breton',
'bs' : 'Bosnian',
'ca' : 'Catalan',
'ce' : 'Chechen',
'ch' : 'Chamorro',
'co' : 'Corsican',
'cr' : 'Cree',
'cs' : 'Czech',
'cu' : 'Old Church Slavonic',
'cv' : 'Chuvash',
'cy' : 'Welsh',
'da' : 'Danish',
'de' : 'German',
'dv' : 'Divehi',
'dz' : 'Dzongkha',
'ee' : 'Ewe',
'el' : 'Greek',
'en' : 'English',
'eo' : 'Esperanto',
'es' : 'Spanish',
'et' : 'Estonian',
'eu' : 'Basque',
'fa' : 'Persian',
'ff' : 'Fula',
'fi' : 'Finnish',
'fj' : 'Fijian',
'fo' : 'Faroese',
'fr' : 'French',
'fy' : 'Western Frisian',
'ga' : 'Irish',
'gd' : 'Scottish Gaelic',
'gl' : 'Galician',
'gn' : 'Guaraní',
'gu' : 'Gujarati',
'gv' : 'Manx',
'ha' : 'Hausa',
'he' : 'Hebrew',
'hi' : 'Hindi',
'ho' : 'Hiri Motu',
'hr' : 'Croatian',
'ht' : 'Haitian',
'hu' : 'Hungarian',
'hy' : 'Armenian',
'hz' : 'Herero',
'ia' : 'Interlingua',
'id' : 'Indonesian',
'ie' : 'Interlingue',
'ig' : 'Igbo',
'ii' : 'Nuosu',
'ik' : 'Inupiaq',
'io' : 'Ido',
'is' : 'Icelandic',
'it' : 'Italian',
'iu' : 'Inuktitut',
'ja' : 'Japanese (ja)',
'jv' : 'Javanese (jv)',
'ka' : 'Georgian',
'kg' : 'Kongo',
'ki' : 'Kikuyu',
'kj' : 'Kwanyama',
'kk' : 'Kazakh',
'kl' : 'Kalaallisut',
'km' : 'Khmer',
'kn' : 'Kannada',
'ko' : 'Korean',
'kr' : 'Kanuri',
'ks' : 'Kashmiri',
'ku' : 'Kurdish',
'kv' : 'Komi',
'kw' : 'Cornish',
'ky' : 'Kirghiz, Kyrgyz',
'la' : 'Latin',
'lb' : 'Luxembourgish',
'lg' : 'Luganda',
'li' : 'Limburgish',
'ln' : 'Lingala',
'lo' : 'Lao',
'lt' : 'Lithuanian',
'lu' : 'Luba-Katanga',
'lv' : 'Latvian',
'mg' : 'Malagasy',
'mh' : 'Marshallese',
'mi' : 'Maori',
'mk' : 'Macedonian',
'ml' : 'Malayalam',
'mn' : 'Mongolian',
'mr' : 'Marathi',
'ms' : 'Malay',
'mt' : 'Maltese',
'my' : 'Burmese',
'na' : 'Nauru',
'nb' : 'Norwegian',
'nd' : 'North Ndebele',
'ne' : 'Nepali',
'ng' : 'Ndonga',
'nl' : 'Dutch',
'nn' : 'Norwegian Nynorsk',
'no' : 'Norwegian',
'nr' : 'South Ndebele',
'nv' : 'Navajo, Navaho',
'ny' : 'Chichewa; Chewa; Nyanja',
'oc' : 'Occitan',
'oj' : 'Ojibwe, Ojibwa',
'om' : 'Oromo',
'or' : 'Oriya',
'os' : 'Ossetian, Ossetic',
'pa' : 'Panjabi, Punjabi',
'pi' : 'Pali',
'pl' : 'Polish',
'ps' : 'Pashto, Pushto',
'pt' : 'Portuguese',
'pb' : 'Brazilian',
'qu' : 'Quechua',
'rm' : 'Romansh',
'rn' : 'Kirundi',
'ro' : 'Romanian',
'ru' : 'Russian',
'rw' : 'Kinyarwanda',
'sa' : 'Sanskrit',
'sc' : 'Sardinian',
'sd' : 'Sindhi',
'se' : 'Northern Sami',
'sg' : 'Sango',
'si' : 'Sinhala, Sinhalese',
'sk' : 'Slovak',
'sl' : 'Slovene',
'sm' : 'Samoan',
'sn' : 'Shona',
'so' : 'Somali',
'sq' : 'Albanian',
'sr' : 'Serbian',
'ss' : 'Swati',
'st' : 'Southern Sotho',
'su' : 'Sundanese',
'sv' : 'Swedish',
'sw' : 'Swahili',
'ta' : 'Tamil',
'te' : 'Telugu',
'tg' : 'Tajik',
'th' : 'Thai',
'ti' : 'Tigrinya',
'tk' : 'Turkmen',
'tl' : 'Tagalog',
'tn' : 'Tswana',
'to' : 'Tonga',
'tr' : 'Turkish',
'ts' : 'Tsonga',
'tt' : 'Tatar',
'tw' : 'Twi',
'ty' : 'Tahitian',
'ug' : 'Uighur',
'uk' : 'Ukrainian',
'ur' : 'Urdu',
'uz' : 'Uzbek',
've' : 'Venda',
'vi' : 'Vietnamese',
'vo' : 'Volapük',
'wa' : 'Walloon',
'wo' : 'Wolof',
'xh' : 'Xhosa',
'yi' : 'Yiddish',
'yo' : 'Yoruba',
'za' : 'Zhuang, Chuang',
'zh' : 'Chinese',
'zu' : 'Zulu' }
SELECT_ITEM = ( 11, 256, 61453, )
EXIT_SCRIPT = ( 10, 247, 275, 61467, 216, 257, 61448, )
CANCEL_DIALOG = EXIT_SCRIPT + ( 216, 257, 61448, )
GET_EXCEPTION = ( 216, 260, 61448, )
SELECT_BUTTON = ( 229, 259, 261, 61453, )
MOVEMENT_UP = ( 166, 270, 61478, )
MOVEMENT_DOWN = ( 167, 271, 61480, )
DEBUG_MODE = 5
# Log status codes
LOG_INFO, LOG_ERROR, LOG_NOTICE, LOG_DEBUG = range( 1, 5 )
def LOG( status, format, *args ):
if ( DEBUG_MODE >= status ):
xbmc.output( "%s: %s\n" % ( ( "INFO", "ERROR", "NOTICE", "DEBUG", )[ status - 1 ], format % args, ) )
def sort_inner(inner):
if("hash" in inner and inner["hash"] == True):
return 100
return inner["percent"]
class GUI( xbmcgui.WindowXMLDialog ):
socket.setdefaulttimeout(10.0) #seconds
def __init__( self, *args, **kwargs ):
pass
def set_filepath( self, path ):
LOG( LOG_INFO, "set_filepath [%s]" , ( path ) )
self.file_original_path = path
self.file_path = path[path.find(os.sep):len(path)]
def set_filehash( self, hash ):
LOG( LOG_INFO, "set_filehash [%s]" , ( hash ) )
self.file_hash = hash
def set_filesize( self, size ):
LOG( LOG_INFO, "set_filesize [%s]" , ( size ) )
self.file_size = size
def set_searchstring( self, search ):
LOG( LOG_INFO, "set_searchstring [%s]" , ( search ) )
self.search_string = search
def set_type( self, type ):
self.file_type = type
def onInit( self ):
LOG( LOG_INFO, "onInit" )
self.setup_all()
if self.file_path:
self.connThread = threading.Thread( target=self.connect, args=() )
self.connThread.start()
def setup_all( self ):
self.setup_variables()
def setup_variables( self ):
self.controlId = -1
self.allow_exception = False
if xbmc.Player().isPlayingVideo():
self.set_filepath( xbmc.Player().getPlayingFile() )
def connect( self ):
self.setup_all()
logging.basicConfig()
self.getControl( LOADING_IMAGE ).setVisible( True )
self.getControl( STATUS_LABEL ).setLabel( "Searching" )
sub_filename = os.path.basename(self.file_original_path)
title = sub_filename[0:sub_filename.rfind(".")]
self.getControl( 180 ).setLabel("[B][UPPERCASE]$LOCALIZE[293]:[/B] " + title + "[/UPPERCASE]");
langs = None
subtitles = []
q = Queue()
self.config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
basepath = "/data/etc" # os.path.dirname(__file__)
self.config.read(basepath + "/.subtitles")
config_plugins = self.config.get("DEFAULT", "plugins")
if(self.file_type == "tv"):
config_plugins = self.config.get("DEFAULT", "tvplugins")
elif(self.file_type == "movie"):
config_plugins = self.config.get("DEFAULT", "movieplugins")
use_plugins = map(lambda x : x.strip(), config_plugins.split(","))
config_langs = self.config.get("DEFAULT", "lang")
if(config_langs != "All" and config_langs != ""):
use_langs = map(lambda x : x.strip(), config_langs.split(","))
else:
use_langs = None
for name in use_plugins:
filep = self.file_original_path
try :
plugin = getattr(plugins, name)(self.config, '/data/hack/cache')
LOG( LOG_INFO, "Searching on %s ", (name) )
thread = threading.Thread(target=plugin.searchInThread, args=(q, str(filep), use_langs))
thread.start()
except ImportError, (e) :
LOG( LOG_INFO, "Plugin %s is not a valid plugin name. Skipping it.", ( e) )
# Get data from the queue and wait till we have a result
count = 0
for name in use_plugins:
subs = q.get(True)
count = count + 1
self.getControl( STATUS_LABEL ).setLabel( "Searching " + str(count) + "/" + str(len(use_plugins)) )
if subs and len(subs) > 0:
if not use_langs:
subtitles += subs
else:
for sub in subs:
lang_code = sub["lang"]
if(lang_code == "pt-br"):
lang_code = "pb"
if lang_code in use_langs:
subtitles += [sub]
if(len(subtitles) > 0):
self.sublist = subtitles
for item in subtitles:
sub_filename = os.path.basename( self.file_original_path )
sub_filename = sub_filename[0:sub_filename.rfind(".")]
percent = (round(difflib.SequenceMatcher(None, sub_filename, item["release"]).ratio(), 2) * 100)
item["percent"] = percent
subtitles.sort(key=sort_inner,reverse=True)
for item in subtitles:
if(item["lang"] and item["release"]):
if(item["lang"] == "pt-br"):
item["lang"] = "pb"
if(item["lang"] in trans_lang):
language = trans_lang[item["lang"]]
else:
language = item["lang"]
listitem = xbmcgui.ListItem( label=language, label2=item["release"], iconImage="0.0", thumbnailImage="flags/" + item["lang"] + ".png" )
listitem.setProperty( "source", str(item["plugin"].__class__.__name__))
listitem.setProperty( "release", item["release"])
listitem.setProperty( "equals", str(item["percent"]) + "%")
if("hash" in item and item["hash"] == True):
listitem.setProperty( "sync", "true" )
else:
listitem.setProperty( "sync", "false" )
self.getControl( SUBTITLES_LIST ).addItem( listitem )
self.setFocus( self.getControl( SUBTITLES_LIST ) )
self.getControl( SUBTITLES_LIST ).selectItem( 0 )
self.getControl( LOADING_IMAGE ).setVisible( False )
self.getControl( STATUS_LABEL ).setVisible( False )
def download_subtitles(self, pos):
if self.sublist:
item = self.sublist[pos]
ok = xbmcgui.Dialog().yesno( "BoxeeSubs", _( 242 ), ( _( 243 ) % ( item["release"], ) ), "", _( 260 ), _( 259 ) )
if not ok:
self.getControl( STATUS_LABEL ).setLabel( _( 645 ) )
return
else:
local_path = xbmc.translatePath("special://home/subtitles")
dp = xbmcgui.DialogProgress()
dp.create( __scriptname__, _( 633 ), os.path.basename( self.file_path ) )
sub_filename = os.path.basename( self.file_path )
sub_filename = sub_filename[0:sub_filename.rfind(".")] + "." + item["lang"] + ".srt"
item["plugin"].downloadFile(item["link"], os.path.join( local_path, sub_filename ))
dp.close()
xbmc.Player().setSubtitles( os.path.join( local_path, sub_filename ) )
xbmc.showNotification( 652, '', '' )
self.getControl( STATUS_LABEL ).setLabel( _( 652 ) )
self.getControl( STATUS_LABEL ).setLabel( _( 649 ) )
self.exit_script()
def exit_script( self, restart=False ):
self.connThread.join()
self.close()
def onClick( self, controlId ):
if ( self.controlId == SUBTITLES_LIST ):
self.download_subtitles( self.getControl( SUBTITLES_LIST ).getSelectedPosition() )
def onFocus( self, controlId ):
self.controlId = controlId
def onAction( self, action ):
try:
if ( action.getButtonCode() in CANCEL_DIALOG ):
self.exit_script()
except:
self.exit_script()
| vLBrian/boxeehack-cigamit | hack/boxee/scripts/OpenSubtitles/resources/lib/gui.py | Python | mit | 11,411 | 0.063897 |
# -*- coding: utf-8 -*-
"""
http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html
http://www.insdc.org/documents/feature_table.html
All keys are native strings, as are values, except the origin which is always
a python 3 byte string (not unicode)
"""
import re
import io
import sys
from collections import OrderedDict
from typing import (
List,
Any
)
def _bytes(x):
return x.encode('utf8') if isinstance(x, str) else x
NEWLINE_STR: str = '\r\n' if sys.platform == 'win32' else '\n'
NEWLINE_BYT: bytes = b'\r\n' if sys.platform == 'win32' else b'\n'
def parse(filepath: str, is_ordered: bool = False) -> dict:
"""
is_ordered == True will retain the order of the qualifiers
"""
d = {b'info': {}}
d_info = d[b'info']
# with io.open(filepath, 'r', encoding='utf-8') as fd:
with io.open(filepath, 'rb') as fd:
raw = fd.read()
start, _, origin = raw.partition(b"ORIGIN")
start, _, features = start.partition(b"FEATURES Location/Qualifiers%s" % (NEWLINE_BYT))
parseLocus(start, d_info)
parseDefinition(start, d_info)
parseAccession(start, d_info)
parseVersion(start, d_info)
parseDBLink(start, d_info)
parseKeywords(start, d_info)
parseSource(start, d_info)
parseOrganism(start, d_info)
d_info[b'references'] = parseReference(start)
_, _, comment = start.partition(b"COMMENT ")
parseComment(d_info, comment)
d[b'features'] = parseFeatures(features, is_ordered)
d[b'seq'] = parseOrigin(origin)
return d
# end def
def parseComment(d: dict, comment: bytes):
if comment != b'':
# get rid of ApE empty comment
if comment.startswith(b"%sCOMMENT " % (NEWLINE_BYT)):
comment = comment[13:]
idx_genome_asm_data = -1
genome_asm_data_newline_count = 0
lines = comment.split(NEWLINE_BYT)
lines = [line.strip() for line in lines]
# print(lines)
# handle ##Genome-Assembly-Data-START## edge case
for i, line in enumerate(lines):
if line == b'':
genome_asm_data_newline_count += 1
elif genome_asm_data_newline_count == 2:
idx_genome_asm_data = i
genome_asm_data_newline_count = 0
else:
genome_asm_data_newline_count = 0
# end for
if idx_genome_asm_data < 0:
d[b'comment'] = b" ".join(lines)
else:
d[b'comment'] = [
b" ".join(lines[:idx_genome_asm_data-2]),
lines[idx_genome_asm_data:-1]
]
# end def
re_locus: List[str] = [
"^LOCUS", # field
" +(?P<name>[\w|.]+)", # name
" +(?P<length>[0-9]+) bp", # sequence length
"(?: +(?P<stranded>[a-z]{2})-)?", # opt: ss, ds, ms
" *(?P<molecule_type>[a-z|A-Z|-|]{2,6})", # molecule type
" +(?P<form>[\w]{6,8})?", # linear or circular
" +(?P<gb_division>[a-z|A-Z]{3})?", # Genbank division
" +(?P<mod_date>[0-9]+-[A-Z]+-[0-9]+)", # modification date
".*%s" % (NEWLINE_STR) # match line end
]
RE_LOCUS: bytes = _bytes("".join(re_locus))
re_definition: List[str] = [
"^DEFINITION", # field
" +(?P<definition>(?:.*%s)(?: .*%s)*)" % (NEWLINE_STR, NEWLINE_STR) # look ahead assertion for multiline
]
RE_DEFINITION: bytes = _bytes("".join(re_definition))
re_accession: List[str] = [
"^ACCESSION", # field
" +(?P<accession>[\w|.]*)" # look ahead assertion for multiline
".*", # match line end
NEWLINE_STR
]
RE_ACCESSION: bytes = _bytes("".join(re_accession))
re_version: List[str] = [ "^VERSION", # field
" +(?P<version>[\w|.]+)", # version
" +GI:(?P<GI>[\w|.]+)" # gi field
".*", # match line end
NEWLINE_STR
]
RE_VERSION: bytes = _bytes("".join(re_version))
RE_DBLINK: bytes = b"^DBLINK +(?P<dblink>[\w|:| |.]+)" + NEWLINE_BYT
re_keywords: List[str] = [
"^KEYWORDS",
" +(?P<keywords>[\w|.]*)"
".*",
NEWLINE_STR
]
RE_KEYWORDS: bytes = _bytes("".join(re_keywords))
re_source: List[str] = [
"^SOURCE",
" +(?P<source>.*)",
NEWLINE_STR
]
RE_SOURCE: bytes = _bytes("".join(re_source))
re_organism: List[str] = [
"^ ORGANISM", # field
"(?: +(?P<organism0>(?:.*%s))?" % NEWLINE_STR,
"(?: +(?P<organism1>(?:.*%s)(?: .*%s)*))?)" % (NEWLINE_STR, NEWLINE_STR) # multiline
]
RE_ORGANISM: bytes = _bytes("".join(re_organism))
RE_COMP_LOCUS: '_sre.SRE_Pattern' = re.compile(RE_LOCUS, flags=re.M)
def parseLocus(raw: bytes, d_out: dict):
m = re.match(RE_COMP_LOCUS, raw)
d = m.groupdict()
d['length'] = int(d['length'])
for k, v in d.items():
d_out[_bytes(k)] = v
#end def
RE_COMP_DEFINITION: '_sre.SRE_Pattern' = re.compile(RE_DEFINITION, flags=re.M)
def parseDefinition(raw: bytes, d_out: dict):
m = re.search(RE_COMP_DEFINITION, raw)
if m is None:
d_out[b'definition'] = None
else:
d = m.groupdict()
if d['definition'] is not None:
temp_l = d['definition'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
d_out[b'definition'] = b" ".join(temp_l)[:-1]
else:
d_out[b'definition'] = None
#end def
RE_COMP_ACCESSION: '_sre.SRE_Pattern' = re.compile(RE_ACCESSION, flags=re.M)
def parseAccession(raw: bytes, d_out: dict):
m = re.search(RE_COMP_ACCESSION, raw)
if m is None:
d_out[b'accession'] = None
else:
d = m.groupdict()
d_out[b'accession'] = d['accession']
# end def
RE_COMP_VERSION: '_sre.SRE_Pattern' = re.compile(RE_VERSION, flags=re.M)
def parseVersion(raw: bytes, d_out: dict):
m = re.search(RE_COMP_VERSION, raw)
if m is None:
d_out[b'version'] = None
else:
d = m.groupdict()
d_out[b'version'] = d['version']
d_out[b'GI'] = d['GI']
# end def
RE_COMP_DBLINK: '_sre.SRE_Pattern' = re.compile(RE_DBLINK, flags=re.M)
def parseDBLink(raw: bytes, d_out: dict):
m = re.search(RE_COMP_DBLINK, raw)
if m is None:
d_out[b'dblink'] = None
else:
d = m.groupdict()
d_out[b'dblink'] = d['dblink']
# end def
RE_COMP_KEYWORDS: '_sre.SRE_Pattern' = re.compile(RE_KEYWORDS, flags=re.M)
def parseKeywords(raw: bytes, d_out: dict):
m = re.search(RE_COMP_KEYWORDS, raw)
if m is None:
d_out[b'keywords'] = None
else:
d = m.groupdict()
d_out[b'keywords'] = d['keywords']
# end def
RE_COMP_SOURCE: '_sre.SRE_Pattern' = re.compile(RE_SOURCE, flags=re.M)
def parseSource(raw: bytes, d_out: dict):
m = re.search(RE_COMP_SOURCE, raw)
if m is None:
d_out[b'source'] = None
else:
d = m.groupdict()
d_out[b'source'] = d['source']
# end def
RE_COMP_ORGANISM: '_sre.SRE_Pattern' = re.compile(RE_ORGANISM, flags=re.M)
def parseOrganism(raw: bytes, d_out: dict):
m = re.search(RE_COMP_ORGANISM, raw)
if m is None:
d_out[b'organism'] = [None, None]
else:
d = m.groupdict()
temp_l = d['organism0'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
org0 = b" ".join(temp_l)[:-1]
org1 = None
if d['organism1'] is not None:
temp_l = d['organism1'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
org1 = b" ".join(temp_l)[:-1]
d_out[b'organism'] = [org0, org1]
# end def
"""
REFERENCE 1 (bases 1 to 5028)
AUTHORS Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W.
TITLE Cloning and sequence of REV7, a gene whose function is required for
DNA damage-induced mutagenesis in Saccharomyces cerevisiae
JOURNAL Yeast 10 (11), 1503-1509 (1994)
PUBMED 7871890
"""
re_reference: List[str] = [
"^REFERENCE",
" +(?P<r_index>[0-9]+)(?: +\(bases (?P<start_idx>[0-9]+) to (?P<end_idx>[0-9]+)\)){0,1}",
".*",
NEWLINE_STR,
"^ AUTHORS",
" +(?P<authors>.+)",
NEWLINE_STR,
"^ TITLE", # field
" +(?P<title>(?:.*%s)(?: .*%s)*)" % (NEWLINE_STR, NEWLINE_STR), # multiline
"^ JOURNAL",
" +(?P<journal_info>.+%s(?: {12}.+%s)*)" % (NEWLINE_STR, NEWLINE_STR),
"(?:^ PUBMED +(?P<pubmed>[0-9]+)%s){0,1}" % (NEWLINE_STR)
]
RE_REFERENCE = _bytes("".join(re_reference))
RE_COMP_REF: '_sre.SRE_Pattern' = re.compile(RE_REFERENCE, flags=re.M)
def parseReference(raw: bytes) -> List[dict]:
ref_list = []
for m in re.finditer(RE_COMP_REF, raw):
d_temp = {}
d = m.groupdict()
temp_l = d['title'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
d_temp[b'title'] = b" ".join(temp_l)[:-1]
temp_l = d['journal_info'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
d_temp[b'journal_info'] = b" ".join(temp_l)[:-1]
d_temp[b'r_index'] = int(d['r_index'])
if d['start_idx'] is not None:
d_temp[b'start_idx'] = int(d['start_idx'])
else:
d_temp[b'start_idx'] = None
if d['end_idx'] is not None:
d_temp[b'end_idx'] = int(d['end_idx'])
else:
d_temp[b'end_idx'] = None
d_temp[b'authors'] = d['authors']
d_temp[b'pubmed'] = d['pubmed']
ref_list.append(d_temp)
# end for
return ref_list
#end def
def addMultivalue(d: dict, key: str, val: Any):
if key in d:
old_val = d[key]
if isinstance(old_val, list):
old_val.append(val)
else:
d[key] = [old_val, val]
else:
d[key] = val
# end def
"""
see section 3.4 Location
"""
re_feature: List[str] = [
"^ {5}(?P<feature_key>[\w]+)",
" +(?P<location>.+)",
NEWLINE_STR,
"(?P<qualifiers>(?:^ {21}.*%s)*)" % (NEWLINE_STR)
]
RE_FEATURE: bytes = _bytes("".join(re_feature))
RE_COMP_FEATURE: '_sre.SRE_Pattern' = re.compile(RE_FEATURE, flags=re.M)
# Qualifers can have tags with /'s in the value so it's tough to escape them
# for now we need to split on " /"
QUOTE_BYTE: int = b'\"'[0]
def parseFeatures(raw: bytes, is_ordered: bool = False) -> List[dict]:
features_list = []
for feature_match in re.finditer(RE_COMP_FEATURE, raw):
feature = feature_match.groupdict()
if 'qualifiers' not in feature:
print(feature)
raise IOError("bad feature")
d = {b'type': feature['feature_key'],
b'location': feature['location'],
# 'partials': (feature['partial5'], feature['partial3']),
b'qualifiers': OrderedDict() if is_ordered else {}
}
qs = d[b'qualifiers']
# prevent splitting on </tags>
qs_list = feature['qualifiers'].split(b' /')
for qualifier in qs_list[1:]:
# heal line breaks
"""
Need to address the multi value key problem
i.e. more than one value in the list of qualifiers
"""
q_list = qualifier.split(b'=')
key = q_list[0]
yes_val = True
try:
q_list = q_list[1].split(NEWLINE_BYT)
if q_list[-1] == b'':
q_list.pop() # remove ending '' item
except:
q_list = [b'']
yes_val = False
q_list = [x.lstrip() for x in q_list]
is_str = True
if key == b'translation':
temp = b"".join(q_list)
elif key in (b'codon_start', b'transl_table'):
is_str = False
temp = b" ".join(q_list)
else:
temp = b" ".join(q_list)
if yes_val and temp[0] == QUOTE_BYTE and temp[-1] == QUOTE_BYTE:
value_to_add = temp[1:-1] if is_str else int(temp[1:-1])
elif not yes_val:
value_to_add = None
else:
value_to_add = temp if is_str else int(temp)
addMultivalue(qs, key, value_to_add)
features_list.append(d)
# end for
return features_list
#end def
def parseOrigin(raw: bytes) -> bytes:
out_list = []
all_lines = raw.split(NEWLINE_BYT)
start = 1 if all_lines[0].strip() == b'' else 0
for line in all_lines[start:-1]:
temp = line.split()
out_list += temp[1:]
seq = b"".join(out_list)
assert(seq.isalpha())
return seq
#end def
if __name__ == "__main__":
import os.path as opath
path = opath.dirname(opath.dirname(opath.dirname(opath.abspath(__file__))))
fn = opath.join(path, "tests", "test_data", "failed.gb")
def main():
d = parse(fn)
return d
# end def
print(main())
| libnano/libnano | libnano/fileio/gb_reader_b.py | Python | gpl-2.0 | 12,973 | 0.004702 |
#!/usr/bin/env python3
import time, threading, queue
from .util import osc, log
class Driver(object):
""" This is a substitute for a realtime system """
def __init__(self):
self.voices = []
self.grain = 0.01 # hundredths are nailed by Granu, w/o load. ms are ignored.
self.t = 0.0
self.previous_t = 0.0
self.callbacks = []
self.running = True
def start(self, skip=0):
start_t = time.time() - skip
last_cue = -1
while self.running:
self.t = time.time() - start_t
if int(self.t) // 15 != last_cue:
last_cue = int(self.t) // 15
log.info("/////////////// [%s] %d:%f ///////////////" % (last_cue, self.t // 60.0, self.t % 60.0))
self._perform_callbacks()
if not self.running:
break
delta_t = self.t - self.previous_t
for voice in self.voices:
voice.update(delta_t)
self.previous_t = self.t
time.sleep(self.grain)
def stop(self):
self.running = False
for voice in self.voices:
voice.end()
log.info("/////////////// END %d:%f ///////////////" % (self.t // 60.0, self.t % 60.0))
time.sleep(1) # for osc to finish
def callback(self, f, t):
t += self.t
self.callbacks.append((f, t))
def _perform_callbacks(self):
for c, callback in enumerate(self.callbacks):
f, t = callback
if t <= self.t:
f()
self.callbacks.remove(callback)
class Synth(threading.Thread):
"""Consume notes and send OSC"""
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
self.msp_sender = osc.Sender(5280)
self.queue = queue.Queue()
self.start()
def send(self, address, *params):
self.queue.put((address, params))
def run(self):
while True:
address, params = self.queue.get()
self.msp_sender.send(address, params)
synth = Synth() # player singleton
driver = Driver()
| brianhouse/wavefarm | granu/braid/core.py | Python | gpl-3.0 | 2,215 | 0.005869 |
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def __init__( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
###########################################################################
def getSitesData( self ):
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
if 'rpcStub' in result:
del( result[ 'rpcStub' ] )
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime = time.time()
return S_OK( self.sitesData )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Sbalbp/DIRAC | FrameworkSystem/Client/SiteMapClient.py | Python | gpl-3.0 | 1,184 | 0.033784 |
# -*- coding: utf-8 -*-
"""
wakatime.offlinequeue
~~~~~~~~~~~~~~~~~~~~~
Queue for saving heartbeats while offline.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
from time import sleep
from .compat import json
from .constants import DEFAULT_SYNC_OFFLINE_ACTIVITY, HEARTBEATS_PER_REQUEST
from .heartbeat import Heartbeat
try:
import sqlite3
HAS_SQL = True
except ImportError: # pragma: nocover
HAS_SQL = False
log = logging.getLogger('WakaTime')
class Queue(object):
db_file = '.wakatime.db'
table_name = 'heartbeat_2'
args = None
configs = None
def __init__(self, args, configs):
self.args = args
self.configs = configs
def connect(self):
conn = sqlite3.connect(self._get_db_file(), isolation_level=None)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
id text,
heartbeat text)
'''.format(self.table_name))
return (conn, c)
def push(self, heartbeat):
if not HAS_SQL:
return
try:
conn, c = self.connect()
data = {
'id': heartbeat.get_id(),
'heartbeat': heartbeat.json(),
}
c.execute('INSERT INTO {0} VALUES (:id,:heartbeat)'.format(self.table_name), data)
conn.commit()
conn.close()
except sqlite3.Error:
log.traceback()
def pop(self):
if not HAS_SQL:
return None
tries = 3
wait = 0.1
try:
conn, c = self.connect()
except sqlite3.Error:
log.traceback(logging.DEBUG)
return None
heartbeat = None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
id = row[0]
heartbeat = Heartbeat(json.loads(row[1]), self.args, self.configs, _clone=True)
c.execute('DELETE FROM {0} WHERE id=?'.format(self.table_name), [id])
conn.commit()
loop = False
except sqlite3.Error:
log.traceback(logging.DEBUG)
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.traceback(logging.DEBUG)
return heartbeat
def push_many(self, heartbeats):
for heartbeat in heartbeats:
self.push(heartbeat)
def pop_many(self, limit=None):
if limit is None:
limit = DEFAULT_SYNC_OFFLINE_ACTIVITY
heartbeats = []
count = 0
while count < limit:
heartbeat = self.pop()
if not heartbeat:
break
heartbeats.append(heartbeat)
count += 1
if count % HEARTBEATS_PER_REQUEST == 0:
yield heartbeats
heartbeats = []
if heartbeats:
yield heartbeats
def _get_db_file(self):
home = '~'
if os.environ.get('WAKATIME_HOME'):
home = os.environ.get('WAKATIME_HOME')
return os.path.join(os.path.expanduser(home), '.wakatime.db')
| wakatime/komodo-wakatime | components/wakatime/offlinequeue.py | Python | bsd-3-clause | 3,427 | 0.000875 |
from flask import render_template, redirect, request, url_for, flash
from . import auth
from ..models import User
from .forms import LoginForm, RegistrationForm
from flask_login import login_user, logout_user, login_required, current_user
from .. import db
from ..email import send_email
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('you have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
flash('You can now login.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
| bobbyxuy/flask_web | app/auth/views.py | Python | mit | 1,344 | 0.001488 |
import socket
from .attrtree import AttrTree
from .checks import Checks
config = AttrTree()
# the list of checks
config.install_attr('checks', Checks())
# This is the base granularity (in seconds) for polling
# Each check may then individually be configured to run every N * tick
config.install_attr('base_tick', 60)
# Default "every" check parameter, can be overridden on a per-check basis
config.install_attr('default_every', 1)
# Default "error_every" (how often we retry checks that are in error) parameter
# -1 disables feature (same as regular "every"), can be also be overridden
config.install_attr('default_error_every', -1)
# Verbosity level (one of CRITICAL, ERROR, WARNING, INFO, DEBUG)
config.install_attr('verb_level', 'INFO')
# Email addresses to send to when an alert is triggered
config.install_attr('emails.to', [])
# The From: address
config.install_attr('emails.addr_from',
'Picomon <picomon@%s>' % socket.getfqdn())
# The SMTP host, with optional :port suffix
config.install_attr('emails.smtp_host', 'localhost:25')
# The inactive timeout after which to close the SMTP connection
config.install_attr('emails.smtp_keepalive_timeout', 60)
# Timeout after which to retry sending emails after a failure
config.install_attr('emails.smtp_retry_timeout', 60)
# Interval in seconds between global reports when some checks are in error
# 0 disables reports
config.install_attr('emails.report.every', 0)
# Subject template for state change email notifications
# available substitutions:
# - state ("Problem" or "OK")
# - check (check's name, like "CheckDNSRec6")
# - dest (the target of the check ie. an IP or a Host's 'name'
# parameter)
config.install_attr('emails.subject_tpl',
'[DOMAIN] {state}: {check} on {dest}')
# reports email subject
config.install_attr('emails.report.subject', '[DOMAIN] Picomon error report')
# watchdog error email subject
config.install_attr('emails.watchdog_subject', '[DOMAIN] Picomon stopped')
| StrasWeb/picomon | picomon/__init__.py | Python | gpl-3.0 | 2,005 | 0 |
__author__ = 'dani882'
# lecture 3.2, slide 6
# Find the cube root of a perfect cube
x = int(raw_input('Enter an integer: '))
ans = 0
while ans**3 < abs(x):
ans = ans + 1
if ans**3 != abs(x):
print(str(x) + ' is not a perfect cube')
else:
if x < 0:
ans = -ans
print('Cube root of ' + str(x) + ' is ' + str(ans)) | dani882/edX---Computer-Science | python/lecture 3.2.py | Python | gpl-2.0 | 336 | 0.002976 |
import marlin | atmb4u/marlin | marlin/__init__.py | Python | bsd-3-clause | 13 | 0.076923 |
# Copyright (C) 2009-2013 Roman Zimbelmann <hut@lepus.uberspace.de>
# This software is distributed under the terms of the GNU GPL version 3.
from inspect import isfunction
from ranger.ext.signals import SignalDispatcher, Signal
from ranger.core.shared import FileManagerAware
from ranger.gui.colorscheme import _colorscheme_name_to_class
import re
import os.path
ALLOWED_SETTINGS = {
'automatically_count_files': bool,
'autosave_bookmarks': bool,
'autoupdate_cumulative_size': bool,
'cd_bookmarks': bool,
'collapse_preview': bool,
'colorscheme': str,
'column_ratios': (tuple, list),
'confirm_on_delete': str,
'dirname_in_tabs': bool,
'display_size_in_main_column': bool,
'display_size_in_status_bar': bool,
'display_tags_in_all_columns': bool,
'draw_borders': bool,
'draw_progress_bar_in_status_bar': bool,
'flushinput': bool,
'hidden_filter': str,
'idle_delay': int,
'max_console_history_size': (int, type(None)),
'max_history_size': (int, type(None)),
'mouse_enabled': bool,
'open_all_images': bool,
'padding_right': bool,
'preview_directories': bool,
'preview_files': bool,
'preview_images': bool,
'preview_max_size': int,
'preview_script': (str, type(None)),
'save_console_history': bool,
'scroll_offset': int,
'shorten_title': int,
'show_cursor': bool, # TODO: not working?
'show_selection_in_titlebar': bool,
'show_hidden_bookmarks': bool,
'show_hidden': bool,
'sort_case_insensitive': bool,
'sort_directories_first': bool,
'sort_reverse': bool,
'sort': str,
'status_bar_on_top': bool,
'tilde_in_titlebar': bool,
'unicode_ellipsis': bool,
'update_title': bool,
'update_tmux_title': bool,
'use_preview_script': bool,
'vcs_aware': bool,
'vcs_backend_bzr': str,
'vcs_backend_git': str,
'vcs_backend_hg': str,
'xterm_alt_key': bool,
}
DEFAULT_VALUES = {
bool: False,
type(None): None,
str: "",
int: 0,
list: [],
tuple: tuple([]),
}
class Settings(SignalDispatcher, FileManagerAware):
def __init__(self):
SignalDispatcher.__init__(self)
self.__dict__['_localsettings'] = dict()
self.__dict__['_localregexes'] = dict()
self.__dict__['_tagsettings'] = dict()
self.__dict__['_settings'] = dict()
for name in ALLOWED_SETTINGS:
self.signal_bind('setopt.'+name,
self._sanitize, priority=1.0)
self.signal_bind('setopt.'+name,
self._raw_set_with_signal, priority=0.2)
def _sanitize(self, signal):
name, value = signal.setting, signal.value
if name == 'column_ratios':
# TODO: cover more cases here
if isinstance(value, tuple):
signal.value = list(value)
if not isinstance(value, list) or len(value) < 2:
signal.value = [1, 1]
else:
signal.value = [int(i) if str(i).isdigit() else 1 \
for i in value]
elif name == 'colorscheme':
_colorscheme_name_to_class(signal)
elif name == 'preview_script':
if isinstance(value, str):
result = os.path.expanduser(value)
if os.path.exists(result):
signal.value = result
else:
signal.value = None
elif name == 'use_preview_script':
if self._settings['preview_script'] is None and value \
and self.fm.ui.is_on:
self.fm.notify("Preview script undefined or not found!",
bad=True)
def set(self, name, value, path=None, tags=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if name not in self._settings:
previous = None
else:
previous=self._settings[name]
assert self._check_type(name, value)
assert not (tags and path), "Can't set a setting for path and tag " \
"at the same time!"
kws = dict(setting=name, value=value, previous=previous,
path=path, tags=tags, fm=self.fm)
self.signal_emit('setopt', **kws)
self.signal_emit('setopt.'+name, **kws)
def get(self, name, path=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if path:
localpath = path
else:
try:
localpath = self.fm.thisdir.path
except:
localpath = path
if localpath:
for pattern, regex in self._localregexes.items():
if name in self._localsettings[pattern] and\
regex.search(localpath):
return self._localsettings[pattern][name]
if self._tagsettings and path:
realpath = os.path.realpath(path)
if realpath in self.fm.tags:
tag = self.fm.tags.marker(realpath)
if tag in self._tagsettings and name in self._tagsettings[tag]:
return self._tagsettings[tag][name]
if name in self._settings:
return self._settings[name]
else:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._raw_set(name, value)
self.__setattr__(name, value)
return self._settings[name]
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self.set(name, value, None)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self.get(name, None)
def __iter__(self):
for x in self._settings:
yield x
def types_of(self, name):
try:
typ = ALLOWED_SETTINGS[name]
except KeyError:
return tuple()
else:
if isinstance(typ, tuple):
return typ
else:
return (typ, )
def _check_type(self, name, value):
typ = ALLOWED_SETTINGS[name]
if isfunction(typ):
assert typ(value), \
"Warning: The option `" + name + "' has an incorrect type!"
else:
assert isinstance(value, typ), \
"Warning: The option `" + name + "' has an incorrect type!"\
" Got " + str(type(value)) + ", expected " + str(typ) + "!" +\
" Please check if your commands.py is up to date." if not \
self.fm.ui.is_set_up else ""
return True
__getitem__ = __getattr__
__setitem__ = __setattr__
def _raw_set(self, name, value, path=None, tags=None):
if path:
if not path in self._localsettings:
try:
regex = re.compile(path)
except:
# Bad regular expression
return
self._localregexes[path] = regex
self._localsettings[path] = dict()
self._localsettings[path][name] = value
# make sure name is in _settings, so __iter__ runs through
# local settings too.
if not name in self._settings:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._settings[name] = value
elif tags:
for tag in tags:
if tag not in self._tagsettings:
self._tagsettings[tag] = dict()
self._tagsettings[tag][name] = value
else:
self._settings[name] = value
def _raw_set_with_signal(self, signal):
self._raw_set(signal.setting, signal.value, signal.path, signal.tags)
class LocalSettings():
def __init__(self, path, parent):
self.__dict__['_parent'] = parent
self.__dict__['_path'] = path
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self._parent.set(name, value, self._path)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self._parent.get(name, self._path)
def __iter__(self):
for x in self._parent._settings:
yield x
__getitem__ = __getattr__
__setitem__ = __setattr__
| mullikine/ranger | ranger/container/settings.py | Python | gpl-3.0 | 8,531 | 0.001524 |
# Copyright 2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test behavior of the Librarian during a database outage.
Database outages happen by accident and during fastdowntime deployments."""
__metaclass__ = type
from cStringIO import StringIO
import urllib2
from fixtures import Fixture
from lp.services.librarian.client import LibrarianClient
from lp.services.librarianserver.testing.server import LibrarianServerFixture
from lp.testing import TestCase
from lp.testing.fixture import PGBouncerFixture
from lp.testing.layers import (
BaseLayer,
DatabaseFunctionalLayer,
)
class PGBouncerLibrarianLayer(DatabaseFunctionalLayer):
"""Custom layer for TestLibrarianDBOutage.
We are using a custom layer instead of standard setUp/tearDown to
avoid the lengthy Librarian startup time, and to cope with undoing
changes made to BaseLayer.config_fixture to allow access to the
Librarian we just started up.
"""
pgbouncer_fixture = None
librarian_fixture = None
@classmethod
def setUp(cls):
# Fixture to hold other fixtures.
cls._fixture = Fixture()
cls._fixture.setUp()
cls.pgbouncer_fixture = PGBouncerFixture()
# Install the PGBouncer fixture so we shut it down to
# create database outages.
cls._fixture.useFixture(cls.pgbouncer_fixture)
# Bring up the Librarian, which will be connecting via
# pgbouncer.
cls.librarian_fixture = LibrarianServerFixture(
BaseLayer.config_fixture)
cls._fixture.useFixture(cls.librarian_fixture)
@classmethod
def tearDown(cls):
cls.pgbouncer_fixture = None
cls.librarian_fixture = None
cls._fixture.cleanUp()
@classmethod
def testSetUp(cls):
cls.pgbouncer_fixture.start()
class TestLibrarianDBOutage(TestCase):
layer = PGBouncerLibrarianLayer
def setUp(self):
super(TestLibrarianDBOutage, self).setUp()
self.pgbouncer = PGBouncerLibrarianLayer.pgbouncer_fixture
self.client = LibrarianClient()
# Add a file to the Librarian so we can download it.
self.url = self._makeLibraryFileUrl()
def _makeLibraryFileUrl(self):
data = 'whatever'
return self.client.remoteAddFile(
'foo.txt', len(data), StringIO(data), 'text/plain')
def getErrorCode(self):
# We need to talk to every Librarian thread to ensure all the
# Librarian database connections are in a known state.
# XXX StuartBishop 2011-09-01 bug=840046: 20 might be overkill
# for the test run, but we have no real way of knowing how many
# connections are in use.
num_librarian_threads = 20
codes = set()
for count in range(num_librarian_threads):
try:
urllib2.urlopen(self.url).read()
codes.add(200)
except urllib2.HTTPError as error:
codes.add(error.code)
self.assertTrue(len(codes) == 1, 'Mixed responses: %s' % str(codes))
return codes.pop()
def test_outage(self):
# Everything should be working fine to start with.
self.assertEqual(self.getErrorCode(), 200)
# When the outage kicks in, we start getting 503 responses
# instead of 200 and 404s.
self.pgbouncer.stop()
self.assertEqual(self.getErrorCode(), 503)
# When the outage is over, things are back to normal.
self.pgbouncer.start()
self.assertEqual(self.getErrorCode(), 200)
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/librarianserver/tests/test_db_outage.py | Python | agpl-3.0 | 3,628 | 0 |
from simplequeue.lib.configuration import config
__all__ = ['config']
| geonetix/simplemq | simplequeue/__init__.py | Python | mit | 71 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2014 Didotech srl (<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import sale_order
from . import sale_order_line
from . import sale_shop
from . import sale_monkey
| iw3hxn/LibrERP | sale_order_version/models/__init__.py | Python | agpl-3.0 | 1,120 | 0 |
#!/usr/bin/env python3
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
License header updater.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import sanity_utils
HEADER = """
This file is part of Shoop.
Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
This source code is licensed under the AGPLv3 license found in the
LICENSE file in the root directory of this source tree.
""".strip()
PY_HEADER = '\n'.join(('# ' + line).strip() for line in HEADER.splitlines())
JS_HEADER = (
'/**\n' +
'\n'.join((' * ' + line).rstrip() for line in HEADER.splitlines()) +
'\n */')
PY_HEADER_LINES = PY_HEADER.encode('utf-8').splitlines()
JS_HEADER_LINES = JS_HEADER.encode('utf-8').splitlines()
def get_adders():
return {
'.py': add_header_to_python_file,
'.js': add_header_to_javascript_file
}
def main():
ap = argparse.ArgumentParser()
ap.add_argument("root", nargs="+", help="Directory roots to recurse through")
ap.add_argument("-w", "--write", help="Actually write changes", action="store_true")
ap.add_argument("-s", "--exit-status", help="Exit with error status when missing headers", action="store_true")
ap.add_argument("-v", "--verbose", help="Log OK files too", action="store_true")
args = ap.parse_args()
adders = get_adders()
paths = find_files(roots=args.root, extensions=set(adders.keys()))
missing = process_files(paths, adders, verbose=args.verbose, write=args.write)
if args.exit_status and missing:
return 1
return 0
def process_files(paths, adders, verbose, write):
width = max(len(s) for s in paths)
missing = set()
for path in sorted(paths):
if os.stat(path).st_size == 0:
if verbose:
print('[+]:%-*s: File is empty' % (width, path))
elif not has_header(path):
missing.add(path)
if write:
adder = adders[os.path.splitext(path)[1]]
adder(path)
print('[!]:%-*s: Modified' % (width, path))
else:
print('[!]:%-*s: Requires license header' % (width, path))
else:
if verbose:
print('[+]:%-*s: File has license header' % (width, path))
return missing
def find_files(roots, extensions):
paths = set()
generated_resources = set()
for root in roots:
for file in sanity_utils.find_files(
root,
generated_resources=generated_resources,
allowed_extensions=extensions,
ignored_dirs=sanity_utils.IGNORED_DIRS + ["migrations"]
):
if not is_file_ignored(file):
paths.add(file)
paths -= generated_resources
return paths
def is_file_ignored(filepath):
filepath = filepath.replace(os.sep, "/")
return (
('vendor' in filepath) or
('doc/_ext/djangodocs.py' in filepath)
)
def has_header(path):
with open(path, 'rb') as fp:
return b"This file is part of Shoop." in fp.read(256)
def add_header_to_python_file(path):
lines = get_lines(path)
if lines:
i = 0
if lines[i].startswith(b'#!'):
i += 1
if i < len(lines) and b'coding' in lines[i]:
i += 1
new_lines = lines[:i] + PY_HEADER_LINES + lines[i:]
write_lines(path, new_lines)
def add_header_to_javascript_file(path):
lines = get_lines(path)
if lines:
new_lines = JS_HEADER_LINES + lines
write_lines(path, new_lines)
def get_lines(path):
with open(path, 'rb') as fp:
contents = fp.read()
if not contents.strip():
return []
return contents.splitlines()
def write_lines(path, new_lines):
with open(path, 'wb') as fp:
for line in new_lines:
fp.write(line + b'\n')
if __name__ == '__main__':
sys.exit(main())
| akx/shoop | _misc/ensure_license_headers.py | Python | agpl-3.0 | 4,075 | 0.001227 |
#implementation of radix sort in Python.
def RadixSort(A):
RADIX = 10
maxLength = False
tmp , placement = -1, 1
while not maxLength:
maxLength = True
buckets = [list() for _ in range(RADIX)]
for i in A:
tmp = i / placement
buckets[tmp % RADIX].append(i)
if maxLength and tmp > 0:
maxLength = False
a = 0
for b in range(RADIX):
buck = buckets[b]
for i in buck:
A[a] = i
a += 1
# move to next digit
placement *= RADIX
A = [534, 246, 933, 127, 277, 321, 454, 565, 220]
print(RadixSort(A))
| applecool/Practice | Python/Sorting/RadixSort.py | Python | mit | 556 | 0.082734 |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, TJ Lane, Osama El-Gabalawy
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
"""
This file contains scripts for calculating scalar (J) Couplings from backbone dihedrals.
"""
##############################################################################
# Imports
##############################################################################
import numpy as np
from mdtraj.geometry import compute_phi
##############################################################################
# Globals
##############################################################################
J3_HN_CB_coefficients = { # See full citations below in docstring references.
"Bax2007": dict(phi0=+60 * np.pi/180., A=3.71, B=-0.59, C=0.08), # From Table 1. in paper
}
J3_HN_CB_uncertainties = {
# Values in [Hz]
"Bax2007": 0.22,
}
J3_HN_C_coefficients = { # See full citations below in docstring references.
"Bax2007": dict(phi0=+180 * np.pi/180., A=4.36, B=-1.08, C=-0.01), # From Table 1. in paper
}
J3_HN_C_uncertainties = {
# Values in [Hz]
"Bax2007": 0.30,
}
J3_HN_HA_coefficients = { # See full citations below in docstring references.
"Ruterjans1999": dict(phi0=-60 * np.pi/180., A=7.90, B=-1.05, C=0.65), # From Table 1. in paper.
"Bax2007": dict(phi0=-60 * np.pi/180., A=8.4, B=-1.36, C=0.33), # From Table 1. in paper
"Bax1997": dict(phi0=-60 * np.pi/180., A=7.09, B=-1.42, C=1.55), # From Table 2. in paper
}
J3_HN_HA_uncertainties = {
# Values in [Hz]
"Ruterjans1999": 0.25,
"Bax2007": 0.36,
"Bax1997": 0.39
}
##############################################################################
# Functions
##############################################################################
def _J3_function(phi, A, B, C, phi0):
"""Return a scalar couplings with a given choice of karplus coefficients. USES RADIANS!"""
return A * np.cos(phi + phi0) ** 2. + B * np.cos(phi + phi0) + C
def compute_J3_HN_HA(traj, model="Bax2007"):
"""Calculate the scalar coupling between HN and H_alpha.
This function does not take into account periodic boundary conditions (it
will give spurious results if the three atoms which make up any angle jump
across a PBC (are not "wholed"))
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to compute J3_HN_HA for
model : string, optional, default="Bax2007"
Which scalar coupling model to use. Must be one of Bax2007, Bax1999,
or Ruterjans1999
Returns
-------
indices : np.ndarray, shape=(n_phi, 4), dtype=int
Atom indices (zero-based) of the phi dihedrals
J : np.ndarray, shape=(n_frames, n_phi)
Scalar couplings (J3_HN_HA, in [Hz]) of this trajectory.
`J[k]` corresponds to the phi dihedral associated with
atoms `indices[k]`
Notes
-----
The coefficients are taken from the references below--please cite them.
References
----------
.. [1] Schmidt, J. M., Blümel, M., Löhr, F., & Rüterjans, H.
"Self-consistent 3J coupling analysis for the joint calibration
of Karplus coefficients and evaluation of torsion angles."
J. Biomol. NMR, 14, 1 1-12 (1999)
.. [2] Vögeli, B., Ying, J., Grishaev, A., & Bax, A.
"Limits on variations in protein backbone dynamics from precise
measurements of scalar couplings."
J. Am. Chem. Soc., 129(30), 9377-9385 (2007)
.. [3] Hu, J. S., & Bax, A.
"Determination of ϕ and ξ1 Angles in Proteins from 13C-13C
Three-Bond J Couplings Measured by Three-Dimensional Heteronuclear NMR.
How Planar Is the Peptide Bond?."
J. Am. Chem. Soc., 119(27), 6360-6368 (1997)
"""
indices, phi = compute_phi(traj)
if model not in J3_HN_HA_coefficients:
raise(KeyError("model must be one of %s" % J3_HN_HA_coefficients.keys()))
J = _J3_function(phi, **J3_HN_HA_coefficients[model])
return indices, J
def compute_J3_HN_C(traj, model="Bax2007"):
"""Calculate the scalar coupling between HN and C_prime.
This function does not take into account periodic boundary conditions (it
will give spurious results if the three atoms which make up any angle jump
across a PBC (are not "wholed"))
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to compute J3_HN_C for
model : string, optional, default="Bax2007"
Which scalar coupling model to use. Must be one of Bax2007
Returns
-------
indices : np.ndarray, shape=(n_phi, 4), dtype=int
Atom indices (zero-based) of the phi dihedrals
J : np.ndarray, shape=(n_frames, n_phi)
Scalar couplings (J3_HN_C, in [Hz]) of this trajectory.
`J[k]` corresponds to the phi dihedral associated with
atoms `indices[k]`
Notes
-----
The coefficients are taken from the references below--please cite them.
References
----------
.. [1] Hu, J. S., & Bax, A.
"Determination of ϕ and ξ1 Angles in Proteins from 13C-13C
Three-Bond J Couplings Measured by Three-Dimensional Heteronuclear NMR.
How Planar Is the Peptide Bond?."
J. Am. Chem. Soc., 119(27), 6360-6368 (1997)
"""
indices, phi = compute_phi(traj)
if model not in J3_HN_C_coefficients:
raise(KeyError("model must be one of %s" % J3_HN_C_coefficients.keys()))
J = _J3_function(phi, **J3_HN_C_coefficients[model])
return indices, J
def compute_J3_HN_CB(traj, model="Bax2007"):
"""Calculate the scalar coupling between HN and C_beta.
This function does not take into account periodic boundary conditions (it
will give spurious results if the three atoms which make up any angle jump
across a PBC (are not "wholed"))
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to compute J3_HN_CB for
model : string, optional, default="Bax2007"
Which scalar coupling model to use. Must be one of Bax2007
Returns
-------
indices : np.ndarray, shape=(n_phi, 4), dtype=int
Atom indices (zero-based) of the phi dihedrals
J : np.ndarray, shape=(n_frames, n_phi)
Scalar couplings (J3_HN_CB, in [Hz]) of this trajectory.
`J[k]` corresponds to the phi dihedral associated with
atoms `indices[k]`
Notes
-----
The coefficients are taken from the references below--please cite them.
References
----------
.. [1] Hu, J. S., & Bax, A.
"Determination of ϕ and ξ1 Angles in Proteins from 13C-13C
Three-Bond J Couplings Measured by Three-Dimensional Heteronuclear NMR.
How Planar Is the Peptide Bond?."
J. Am. Chem. Soc., 119(27), 6360-6368 (1997)
"""
indices, phi = compute_phi(traj)
if model not in J3_HN_CB_coefficients:
raise(KeyError("model must be one of %s" % J3_HN_CB_coefficients.keys()))
J = _J3_function(phi, **J3_HN_CB_coefficients[model])
return indices, J
| msultan/mdtraj | mdtraj/nmr/scalar_couplings.py | Python | lgpl-2.1 | 8,005 | 0.001376 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_cnn_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(15)
class SkiaCnnDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaCnnDesktopPageSet, self).__init__(
archive_data_file='data/skia_cnn_desktop.json')
urls_list = [
# go/skia-skps-3-2019
'http://www.cnn.com',
]
for url in urls_list:
self.AddStory(SkiaDesktopPage(url, self)) | HalCanary/skia-hc | tools/skp/page_sets/skia_cnn_desktop.py | Python | bsd-3-clause | 1,145 | 0.00524 |
# @name <%= app_name %>
# @description
# Models for UserControler.
import json
from src.models import BaseModel
class <%= endpoint %>Model(BaseModel):
_parse_class_name = '<%= table %>'
pass | nghiattran/generator-python-parse | generators/endpoint/templates/model_template.py | Python | mit | 201 | 0.014925 |
from __future__ import absolute_import
from .base import WhiteNoise
__version__ = '2.0.3'
__all__ = ['WhiteNoise']
| KnockSoftware/whitenoise | whitenoise/__init__.py | Python | mit | 118 | 0 |
from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filer_id_raw': ALL}
excludes = ['id']
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filing_id_raw': ALL}
excludes = ['id']
| myersjustinc/django-calaccess-campaign-browser | calaccess_campaign_browser/api.py | Python | mit | 545 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from flask import Blueprint, request
import jimit as ji
from models import UidOpenidMapping
from models import Utils, Rules, User
__author__ = 'James Iter'
__date__ = '16/6/8'
__contact__ = 'james.iter.cn@gmail.com'
__copyright__ = '(c) 2016 by James Iter.'
blueprint = Blueprint(
'user_mgmt',
__name__,
url_prefix='/api/user_mgmt'
)
blueprints = Blueprint(
'users_mgmt',
__name__,
url_prefix='/api/users_mgmt'
)
@Utils.dumps2response
@Utils.superuser
def r_get(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
user.get()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = user.__dict__
del ret['data']['password']
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_get_by_login_name(login_name=None):
user = User()
args_rules = [
Rules.LOGIN_NAME.value
]
user.login_name = login_name
try:
ji.Check.previewing(args_rules, user.__dict__)
user.get_by('login_name')
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = user.__dict__
del ret['data']['password']
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_enable(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
if user.id == 1:
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
user.get()
except ji.PreviewingError, e:
return json.loads(e.message)
args_rules = [
Rules.ENABLED.value
]
user.enabled = True
try:
ji.Check.previewing(args_rules, user.__dict__)
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_disable(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
if user.id == 1:
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
user.get()
except ji.PreviewingError, e:
return json.loads(e.message)
args_rules = [
Rules.ENABLED.value
]
user.enabled = False
try:
ji.Check.previewing(args_rules, user.__dict__)
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_delete(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
if user.id == 1:
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
user.delete()
# 删除依赖于该用户的openid
UidOpenidMapping.delete_by_filter('uid:in:' + _id)
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_update(_id):
user = User()
args_rules = [
Rules.UID.value
]
if 'login_name' in request.json:
args_rules.append(
Rules.LOGIN_NAME.value
)
if 'mobile_phone' in request.json:
args_rules.append(
Rules.MOBILE_PHONE.value
)
if 'mobile_phone_verified' in request.json:
args_rules.append(
Rules.MOBILE_PHONE_VERIFIED.value
)
if 'email' in request.json:
args_rules.append(
Rules.EMAIL.value
)
if 'email_verified' in request.json:
args_rules.append(
Rules.EMAIL_VERIFIED.value
)
if 'role_id' in request.json:
request.json['role_id'] = request.json['role_id'].__str__()
args_rules.append(
Rules.ROLE_ID_EXT.value
)
if args_rules.__len__() < 2:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
request.json['id'] = _id
try:
ji.Check.previewing(args_rules, request.json)
user.id = int(request.json.get('id'))
user.get()
user.login_name = request.json.get('login_name', user.login_name)
user.mobile_phone = request.json.get('mobile_phone', user.mobile_phone)
user.mobile_phone_verified = request.json.get('mobile_phone_verified', user.mobile_phone_verified)
user.email = request.json.get('email', user.email)
user.email_verified = request.json.get('email_verified', user.email_verified)
user.role_id = int(request.json.get('role_id', user.role_id))
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_change_password(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.get()
except ji.PreviewingError, e:
return json.loads(e.message)
args_rules = [
Rules.PASSWORD.value
]
user.password = request.json.get('password')
try:
ji.Check.previewing(args_rules, user.__dict__)
user.password = ji.Security.ji_pbkdf2(user.password)
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_get_by_filter():
page = str(request.args.get('page', 1))
page_size = str(request.args.get('page_size', 50))
args_rules = [
Rules.PAGE.value,
Rules.PAGE_SIZE.value
]
try:
ji.Check.previewing(args_rules, {'page': page, 'page_size': page_size})
except ji.PreviewingError, e:
return json.loads(e.message)
page = int(page)
page_size = int(page_size)
# 把page和page_size换算成offset和limit
offset = (page - 1) * page_size
# offset, limit将覆盖page及page_size的影响
offset = str(request.args.get('offset', offset))
limit = str(request.args.get('limit', page_size))
order_by = request.args.get('order_by', 'id')
order = request.args.get('order', 'asc')
filter_str = request.args.get('filter', '')
args_rules = [
Rules.OFFSET.value,
Rules.LIMIT.value,
Rules.ORDER_BY.value,
Rules.ORDER.value
]
try:
ji.Check.previewing(args_rules, {'offset': offset, 'limit': limit, 'order_by': order_by, 'order': order})
offset = int(offset)
limit = int(limit)
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
ret['paging'] = {'total': 0, 'offset': offset, 'limit': limit, 'page': page, 'page_size': page_size,
'next': '', 'prev': '', 'first': '', 'last': ''}
ret['data'], ret['paging']['total'] = User.get_by_filter(offset=offset, limit=limit, order_by=order_by,
order=order, filter_str=filter_str)
host_url = request.host_url.rstrip('/')
other_str = '&filter=' + filter_str + '&order=' + order + '&order_by=' + order_by
last_pagination = (ret['paging']['total'] + page_size - 1) / page_size
if page <= 1:
ret['paging']['prev'] = host_url + blueprints.url_prefix + '?page=1&page_size=' + page_size.__str__() + \
other_str
else:
ret['paging']['prev'] = host_url + blueprints.url_prefix + '?page=' + str(page-1) + '&page_size=' + \
page_size.__str__() + other_str
if page >= last_pagination:
ret['paging']['next'] = host_url + blueprints.url_prefix + '?page=' + last_pagination.__str__() + \
'&page_size=' + page_size.__str__() + other_str
else:
ret['paging']['next'] = host_url + blueprints.url_prefix + '?page=' + str(page+1) + '&page_size=' + \
page_size.__str__() + other_str
ret['paging']['first'] = host_url + blueprints.url_prefix + '?page=1&page_size=' + \
page_size.__str__() + other_str
ret['paging']['last'] = \
host_url + blueprints.url_prefix + '?page=' + last_pagination.__str__() + '&page_size=' + \
page_size.__str__() + other_str
for i in range(ret['data'].__len__()):
del ret['data'][i]['password']
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_update_by_uid_s():
args_rules = [
Rules.UIDS.value
]
if 'mobile_phone_verified' in request.json:
args_rules.append(
Rules.MOBILE_PHONE_VERIFIED.value
)
if 'enabled' in request.json:
args_rules.append(
Rules.ENABLED.value
)
if 'email_verified' in request.json:
args_rules.append(
Rules.EMAIL_VERIFIED.value
)
if 'role_id' in request.json:
args_rules.append(
Rules.ROLE_ID_EXT.value
)
if args_rules.__len__() < 2:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
try:
ji.Check.previewing(args_rules, request.json)
filter_str = 'id:IN:' + request.json.get('ids')
User.update_by_filter(kv=request.json, filter_str=filter_str)
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_delete_by_uid_s():
args_rules = [
Rules.UIDS.value
]
try:
ji.Check.previewing(args_rules, request.json)
uid_s = request.json.get('ids')
if '1' in uid_s.split(','):
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
filter_str = 'id:IN:' + request.json.get('ids')
User.delete_by_filter(filter_str=filter_str)
# 删除依赖于这些用户的openid
UidOpenidMapping.delete_by_filter('uid:in:' + request.json.get('ids'))
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_content_search():
page = str(request.args.get('page', 1))
page_size = str(request.args.get('page_size', 50))
args_rules = [
Rules.PAGE.value,
Rules.PAGE_SIZE.value
]
try:
ji.Check.previewing(args_rules, {'page': page, 'page_size': page_size})
except ji.PreviewingError, e:
return json.loads(e.message)
page = int(page)
page_size = int(page_size)
# 把page和page_size换算成offset和limit
offset = (page - 1) * page_size
# offset, limit将覆盖page及page_size的影响
offset = str(request.args.get('offset', offset))
limit = str(request.args.get('limit', page_size))
order_by = request.args.get('order_by', 'id')
order = request.args.get('order', 'asc')
keyword = request.args.get('keyword', '')
args_rules = [
Rules.OFFSET.value,
Rules.LIMIT.value,
Rules.ORDER_BY.value,
Rules.ORDER.value,
Rules.KEYWORD.value
]
try:
ji.Check.previewing(args_rules, {'offset': offset, 'limit': limit, 'order_by': order_by, 'order': order,
'keyword': keyword})
offset = int(offset)
limit = int(limit)
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
ret['paging'] = {'total': 0, 'offset': offset, 'limit': limit, 'page': page, 'page_size': page_size}
ret['data'], ret['paging']['total'] = User.content_search(offset=offset, limit=limit, order_by=order_by,
order=order, keyword=keyword)
host_url = request.host_url.rstrip('/')
other_str = '&keyword=' + keyword + '&order=' + order + '&order_by=' + order_by
last_pagination = (ret['paging']['total'] + page_size - 1) / page_size
if page <= 1:
ret['paging']['prev'] = host_url + blueprints.url_prefix + '/_search?page=1&page_size=' + \
page_size.__str__() + other_str
else:
ret['paging']['prev'] = host_url + blueprints.url_prefix + '/_search?page=' + str(page-1) + \
'&page_size=' + page_size.__str__() + other_str
if page >= last_pagination:
ret['paging']['next'] = host_url + blueprints.url_prefix + '/_search?page=' + last_pagination.__str__() + \
'&page_size=' + page_size.__str__() + other_str
else:
ret['paging']['next'] = host_url + blueprints.url_prefix + '/_search?page=' + str(page+1) + \
'&page_size=' + page_size.__str__() + other_str
ret['paging']['first'] = host_url + blueprints.url_prefix + '/_search?page=1&page_size=' + \
page_size.__str__() + other_str
ret['paging']['last'] = \
host_url + blueprints.url_prefix + '/_search?page=' + last_pagination.__str__() + '&page_size=' + \
page_size.__str__() + other_str
for i in range(ret['data'].__len__()):
del ret['data'][i]['password']
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
| jamesiter/jimauth | views/user_mgmt.py | Python | gpl-3.0 | 14,179 | 0.001987 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-03 00:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='courses/images', verbose_name='Imagem'),
),
migrations.AlterField(
model_name='course',
name='name',
field=models.CharField(max_length=100, unique=True, verbose_name='Nome'),
),
]
| rafaelribeiroo/ensinoDistancia | src/apps/courses/migrations/0002_auto_20171103_0057.py | Python | gpl-3.0 | 685 | 0.00292 |
"""
In order to create a package for pypi, you need to follow several steps.
1. Create a .pypirc in your home directory. It should look like this:
```
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=allennlp
password= Get the password from LastPass.
[pypitest]
repository=https://test.pypi.org/legacy/
username=allennlp
password= Get the password from LastPass.
```
run chmod 600 ./pypirc so only you can read/write.
1. Change the version in docs/conf.py and setup.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi allennlp
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from setuptools import setup, find_packages
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
VERSION = '0.3.1-unreleased'
setup(name='allennlp',
version=VERSION,
description='An open-source NLP research library, built on PyTorch.',
classifiers=[
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='allennlp NLP deep learning machine reading',
url='https://github.com/allenai/allennlp',
author='Allen Institute for Artificial Intelligence',
author_email='allennlp@allenai.org',
license='Apache',
packages=find_packages(),
install_requires=[
'pyhocon==0.3.35',
'typing',
'overrides',
'nltk',
'spacy>=2.0,<2.1',
'numpy',
'tensorboard',
'cffi==1.11.2',
'awscli>=1.11.91',
'flask==0.12.1',
'flask-cors==3.0.3',
'psycopg2',
'argparse',
'requests>=2.18',
'tqdm',
'editdistance',
'jupyter',
'h5py',
'scikit-learn',
'scipy',
'pytz==2017.3'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
python_requires='>=3.6',
zip_safe=False)
| nafitzgerald/allennlp | setup.py | Python | apache-2.0 | 3,457 | 0.002025 |
import csv, sqlite3
con = sqlite3.connect("toto.db") # change to 'sqlite:///your_filename.db'
cur = con.cursor()
cur.execute("CREATE TABLE t (col1, col2);") # use your column names here
with open('data.csv','r') as fin: # `with` statement available in 2.5+
# csv.DictReader uses first line in file for column headings by default
dr = csv.DictReader(fin) # comma is default delimiter
to_db = [(i['col1'], i['col2']) for i in dr]
cur.executemany("INSERT INTO t (col1, col2) VALUES (?, ?);", to_db)
con.commit()
con.close()
| fccagou/tools | python/sql/csv-to-sql.py | Python | gpl-2.0 | 537 | 0.013035 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tools for dealing with Arrow type metadata in Python
from pyarrow.lib import (is_boolean_value, # noqa
is_integer_value,
is_float_value)
import pyarrow.lib as lib
_SIGNED_INTEGER_TYPES = {lib.Type_INT8, lib.Type_INT16, lib.Type_INT32,
lib.Type_INT64}
_UNSIGNED_INTEGER_TYPES = {lib.Type_UINT8, lib.Type_UINT16, lib.Type_UINT32,
lib.Type_UINT64}
_INTEGER_TYPES = _SIGNED_INTEGER_TYPES | _UNSIGNED_INTEGER_TYPES
_FLOATING_TYPES = {lib.Type_HALF_FLOAT, lib.Type_FLOAT, lib.Type_DOUBLE}
_DATE_TYPES = {lib.Type_DATE32, lib.Type_DATE64}
_TIME_TYPES = {lib.Type_TIME32, lib.Type_TIME64}
_TEMPORAL_TYPES = {lib.Type_TIMESTAMP} | _TIME_TYPES | _DATE_TYPES
_NESTED_TYPES = {lib.Type_LIST, lib.Type_STRUCT, lib.Type_UNION, lib.Type_MAP}
def is_null(t):
"""
Return True if value is an instance of a null type
"""
return t.id == lib.Type_NA
def is_boolean(t):
"""
Return True if value is an instance of a boolean type
"""
return t.id == lib.Type_BOOL
def is_integer(t):
"""
Return True if value is an instance of any integer type
"""
return t.id in _INTEGER_TYPES
def is_signed_integer(t):
"""
Return True if value is an instance of any signed integer type
"""
return t.id in _SIGNED_INTEGER_TYPES
def is_unsigned_integer(t):
"""
Return True if value is an instance of any unsigned integer type
"""
return t.id in _UNSIGNED_INTEGER_TYPES
def is_int8(t):
"""
Return True if value is an instance of an int8 type
"""
return t.id == lib.Type_INT8
def is_int16(t):
"""
Return True if value is an instance of an int16 type
"""
return t.id == lib.Type_INT16
def is_int32(t):
"""
Return True if value is an instance of an int32 type
"""
return t.id == lib.Type_INT32
def is_int64(t):
"""
Return True if value is an instance of an int64 type
"""
return t.id == lib.Type_INT64
def is_uint8(t):
"""
Return True if value is an instance of an uint8 type
"""
return t.id == lib.Type_UINT8
def is_uint16(t):
"""
Return True if value is an instance of an uint16 type
"""
return t.id == lib.Type_UINT16
def is_uint32(t):
"""
Return True if value is an instance of an uint32 type
"""
return t.id == lib.Type_UINT32
def is_uint64(t):
"""
Return True if value is an instance of an uint64 type
"""
return t.id == lib.Type_UINT64
def is_floating(t):
"""
Return True if value is an instance of a floating point numeric type
"""
return t.id in _FLOATING_TYPES
def is_float16(t):
"""
Return True if value is an instance of an float16 (half-precision) type
"""
return t.id == lib.Type_HALF_FLOAT
def is_float32(t):
"""
Return True if value is an instance of an float32 (single precision) type
"""
return t.id == lib.Type_FLOAT
def is_float64(t):
"""
Return True if value is an instance of an float64 (double precision) type
"""
return t.id == lib.Type_DOUBLE
def is_list(t):
"""
Return True if value is an instance of a list type
"""
return t.id == lib.Type_LIST
def is_struct(t):
"""
Return True if value is an instance of a struct type
"""
return t.id == lib.Type_STRUCT
def is_union(t):
"""
Return True if value is an instance of a union type
"""
return t.id == lib.Type_UNION
def is_nested(t):
"""
Return True if value is an instance of a nested type
"""
return t.id in _NESTED_TYPES
def is_temporal(t):
"""
Return True if value is an instance of a temporal (date, time, timestamp)
type
"""
return t.id in _TEMPORAL_TYPES
def is_timestamp(t):
"""
Return True if value is an instance of a timestamp type
"""
return t.id == lib.Type_TIMESTAMP
def is_time(t):
"""
Return True if value is an instance of a time type
"""
return t.id in _TIME_TYPES
def is_time32(t):
"""
Return True if value is an instance of a time32 type
"""
return t.id == lib.Type_TIME32
def is_time64(t):
"""
Return True if value is an instance of a time64 type
"""
return t.id == lib.Type_TIME64
def is_binary(t):
"""
Return True if value is an instance of a variable-length binary type
"""
return t.id == lib.Type_BINARY
def is_unicode(t):
"""
Alias for is_string
"""
return is_string(t)
def is_string(t):
"""
Return True if value is an instance of string (utf8 unicode) type
"""
return t.id == lib.Type_STRING
def is_fixed_size_binary(t):
"""
Return True if value is an instance of a fixed size binary type
"""
return t.id == lib.Type_FIXED_SIZE_BINARY
def is_date(t):
"""
Return True if value is an instance of a date type
"""
return t.id in _DATE_TYPES
def is_date32(t):
"""
Return True if value is an instance of a date32 (days) type
"""
return t.id == lib.Type_DATE32
def is_date64(t):
"""
Return True if value is an instance of a date64 (milliseconds) type
"""
return t.id == lib.Type_DATE64
def is_map(t):
"""
Return True if value is an instance of a map logical type
"""
return t.id == lib.Type_MAP
def is_decimal(t):
"""
Return True if value is an instance of a decimal type
"""
return t.id == lib.Type_DECIMAL
def is_dictionary(t):
"""
Return True if value is an instance of a dictionary-encoded type
"""
return t.id == lib.Type_DICTIONARY
def is_primitive(t):
"""
Return True if the value is an instance of a primitive type
"""
return lib._is_primitive(t.id)
| itaiin/arrow | python/pyarrow/types.py | Python | apache-2.0 | 6,569 | 0 |
"""The Energy integration."""
from __future__ import annotations
from homeassistant.components import frontend
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery
from homeassistant.helpers.typing import ConfigType
from . import websocket_api
from .const import DOMAIN
from .data import async_get_manager
async def is_configured(hass: HomeAssistant) -> bool:
"""Return a boolean to indicate if energy is configured."""
manager = await async_get_manager(hass)
if manager.data is None:
return False
return bool(manager.data != manager.default_preferences())
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Energy."""
websocket_api.async_setup(hass)
frontend.async_register_built_in_panel(hass, DOMAIN, DOMAIN, "mdi:lightning-bolt")
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
hass.data[DOMAIN] = {
"cost_sensors": {},
}
return True
| jawilson/home-assistant | homeassistant/components/energy/__init__.py | Python | apache-2.0 | 1,029 | 0.000972 |
from django.forms.models import model_to_dict
from django.db.models import Count
from django.core.paginator import Paginator
import json
from core.apps.tools.common import render_json, dump_and_render_json,\
MeeMJSONEncoder
from core.apps.accounts.models import User
from core.apps.history.models import UserLogEntry
from teacher.apps.collaboration.models import Collaborator
from teacher.apps.collaboration.models import CollaboratorInvitation
from teacher.apps.collaboration.teams.models import Team, TeamMemberShip
from teacher.common import get_file_media_url
# User.objects.annotate(page_count=Count('page')).filter(page_count__gte=2).count()
def default(request):
if request.user.is_authenticated ():
userid = request.user.id
user = User.objects.select_related().get(pk=userid)
model = model_to_dict(user, ['username', 'email'])
invitations_count = CollaboratorInvitation.objects.filter(fromuser=user.id).count()
invitations_recieved = CollaboratorInvitation.objects.filter(usermail=user.email).count()
studentCount = 0
course_count = 0
courses = user.courses.all ()
for course in courses :
studentCount = studentCount + course.students.count ()
course_count = course_count + 1
staff_count = Team.objects.annotate(staff_count=Count('members')).filter(owner=user).values ('staff_count')
staff_count = staff_count[0]['staff_count']
"""
collaborations = user.my_collaborators.select_related().all()
other_collaborations = user.my_collaborators_with_others.select_related().all()
"""
collaborators = Collaborator.objects.filter(source=user).all ()
member_in_teams = TeamMemberShip.objects.filter(member__in=collaborators).select_related('team', 'assigned_tasks').all ()
tasks_count = 0
todos = []
# FIXME
for item in member_in_teams :
tasks_count += item.team.assigned_tasks.count()
for task in item.team.tasks.all() :
task_ = model_to_dict(task, ['id', 'start', 'end', 'title'])
if getattr(task, 'label', False):
task_.update({ 'color' : task.label.color})
else :
task_.update({ 'color' : '#ccc'})
todos.append(task_)
model.update({
'id' : user.id ,
'username' : user.email,
'img_not_found' : '/images/team/houssem.jpg',
'thamb_img_url' : get_file_media_url (user.profile.cover, 'location'),
'studentsCount' : studentCount,
'coursesCount' : course_count ,
'collaboratorsCount' : staff_count,
'tasksCount' : tasks_count,
'invitations_sent_count' : invitations_count,
'invitations_recieved_count' : invitations_recieved,
'progress' : get_profile_progress(user),
});
recents = user.history.all()
paginator = Paginator(recents, 10)
recents_activities = paginator.page(1)
recents_activities_list = []
for item in recents_activities :
item_ = model_to_dict(item, fields=['id', 'action_time', 'object_id'])
item_.update({'model' : item.content_type.model})
recents_activities_list.append(item_) #.push(item_)
model.update({'history' : recents_activities_list})
model.update({'todos' : todos})
return render_json(request, json.dumps(model, encoding="utf-8", cls=MeeMJSONEncoder))
else :
return dump_and_render_json(request, None)
def get_profile_progress(user) :
# privacy
# lang
# web sites
# emails
# location
return 15
| houssemFat/MeeM-Dev | teacher/apps/dashboard/views.py | Python | mit | 3,848 | 0.013514 |
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2015 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class core(object):
def __init__(self, zap):
self.zap = zap
def alert(self, id):
"""
Gets the alert with the given ID, the corresponding HTTP message can be obtained with the 'messageId' field and 'message' API method
"""
return next(self.zap._request(self.zap.base + 'core/view/alert/', {'id' : id}).itervalues())
def alerts(self, baseurl='', start='', count=''):
"""
Gets the alerts raised by ZAP, optionally filtering by URL and paginating with 'start' position and 'count' of alerts
"""
return next(self.zap._request(self.zap.base + 'core/view/alerts/', {'baseurl' : baseurl, 'start' : start, 'count' : count}).itervalues())
def number_of_alerts(self, baseurl=''):
"""
Gets the number of alerts, optionally filtering by URL
"""
return next(self.zap._request(self.zap.base + 'core/view/numberOfAlerts/', {'baseurl' : baseurl}).itervalues())
@property
def hosts(self):
"""
Gets the name of the hosts accessed through/by ZAP
"""
return next(self.zap._request(self.zap.base + 'core/view/hosts/').itervalues())
@property
def sites(self):
"""
Gets the sites accessed through/by ZAP (scheme and domain)
"""
return next(self.zap._request(self.zap.base + 'core/view/sites/').itervalues())
@property
def urls(self):
"""
Gets the URLs accessed through/by ZAP
"""
return next(self.zap._request(self.zap.base + 'core/view/urls/').itervalues())
def message(self, id):
"""
Gets the HTTP message with the given ID. Returns the ID, request/response headers and bodies, cookies and note.
"""
return next(self.zap._request(self.zap.base + 'core/view/message/', {'id' : id}).itervalues())
def messages(self, baseurl='', start='', count=''):
"""
Gets the HTTP messages sent by ZAP, request and response, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
return next(self.zap._request(self.zap.base + 'core/view/messages/', {'baseurl' : baseurl, 'start' : start, 'count' : count}).itervalues())
def number_of_messages(self, baseurl=''):
"""
Gets the number of messages, optionally filtering by URL
"""
return next(self.zap._request(self.zap.base + 'core/view/numberOfMessages/', {'baseurl' : baseurl}).itervalues())
@property
def version(self):
"""
Gets ZAP version
"""
return next(self.zap._request(self.zap.base + 'core/view/version/').itervalues())
@property
def excluded_from_proxy(self):
"""
Gets the regular expressions, applied to URLs, to exclude from the Proxy
"""
return next(self.zap._request(self.zap.base + 'core/view/excludedFromProxy/').itervalues())
@property
def home_directory(self):
return next(self.zap._request(self.zap.base + 'core/view/homeDirectory/').itervalues())
def stats(self, keyprefix=''):
return next(self.zap._request(self.zap.base + 'core/view/stats/', {'keyPrefix' : keyprefix}).itervalues())
@property
def option_http_state_enabled(self):
return next(self.zap._request(self.zap.base + 'core/view/optionHttpStateEnabled/').itervalues())
@property
def option_use_proxy_chain(self):
return next(self.zap._request(self.zap.base + 'core/view/optionUseProxyChain/').itervalues())
@property
def option_proxy_chain_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainName/').itervalues())
@property
def option_proxy_chain_port(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPort/').itervalues())
@property
def option_proxy_chain_skip_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainSkipName/').itervalues())
@property
def option_use_proxy_chain_auth(self):
return next(self.zap._request(self.zap.base + 'core/view/optionUseProxyChainAuth/').itervalues())
@property
def option_proxy_chain_user_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainUserName/').itervalues())
@property
def option_proxy_chain_realm(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainRealm/').itervalues())
@property
def option_proxy_chain_password(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPassword/').itervalues())
@property
def option_proxy_chain_prompt(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPrompt/').itervalues())
@property
def option_http_state(self):
return next(self.zap._request(self.zap.base + 'core/view/optionHttpState/').itervalues())
@property
def option_timeout_in_secs(self):
return next(self.zap._request(self.zap.base + 'core/view/optionTimeoutInSecs/').itervalues())
@property
def option_single_cookie_request_header(self):
return next(self.zap._request(self.zap.base + 'core/view/optionSingleCookieRequestHeader/').itervalues())
@property
def option_proxy_excluded_domains(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomains/').itervalues())
@property
def option_proxy_excluded_domains_enabled(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomainsEnabled/').itervalues())
@property
def option_default_user_agent(self):
return next(self.zap._request(self.zap.base + 'core/view/optionDefaultUserAgent/').itervalues())
def shutdown(self, apikey=''):
"""
Shuts down ZAP
"""
return next(self.zap._request(self.zap.base + 'core/action/shutdown/', {'apikey' : apikey}).itervalues())
def new_session(self, name='', overwrite='', apikey=''):
"""
Creates a new session, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/newSession/', {'name' : name, 'overwrite' : overwrite, 'apikey' : apikey}).itervalues())
def load_session(self, name, apikey=''):
"""
Loads the session with the given name. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/loadSession/', {'name' : name, 'apikey' : apikey}).itervalues())
def save_session(self, name, overwrite='', apikey=''):
"""
Saves the session with the name supplied, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/saveSession/', {'name' : name, 'overwrite' : overwrite, 'apikey' : apikey}).itervalues())
def snapshot_session(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/snapshotSession/', {'apikey' : apikey}).itervalues())
def clear_excluded_from_proxy(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/clearExcludedFromProxy/', {'apikey' : apikey}).itervalues())
def exclude_from_proxy(self, regex, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/excludeFromProxy/', {'regex' : regex, 'apikey' : apikey}).itervalues())
def set_home_directory(self, dir, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setHomeDirectory/', {'dir' : dir, 'apikey' : apikey}).itervalues())
def generate_root_ca(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/generateRootCA/', {'apikey' : apikey}).itervalues())
def send_request(self, request, followredirects='', apikey=''):
"""
Sends the HTTP request, optionally following redirections. Returns the request sent and response received and followed redirections, if any.
"""
return next(self.zap._request(self.zap.base + 'core/action/sendRequest/', {'request' : request, 'followRedirects' : followredirects, 'apikey' : apikey}).itervalues())
def delete_all_alerts(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/deleteAllAlerts/', {'apikey' : apikey}).itervalues())
def run_garbage_collection(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/runGarbageCollection/', {'apikey' : apikey}).itervalues())
def clear_stats(self, keyprefix, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/clearStats/', {'keyPrefix' : keyprefix, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_name(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainName/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_realm(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainRealm/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_user_name(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainUserName/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_password(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPassword/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_skip_name(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainSkipName/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_default_user_agent(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionDefaultUserAgent/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_http_state_enabled(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionHttpStateEnabled/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_port(self, integer, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPort/', {'Integer' : integer, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_prompt(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPrompt/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_timeout_in_secs(self, integer, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionTimeoutInSecs/', {'Integer' : integer, 'apikey' : apikey}).itervalues())
def set_option_use_proxy_chain(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionUseProxyChain/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_use_proxy_chain_auth(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionUseProxyChainAuth/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_single_cookie_request_header(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionSingleCookieRequestHeader/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def proxy_pac(self, apikey=''):
return (self.zap._request_other(self.zap.base_other + 'core/other/proxy.pac/', {'apikey' : apikey}))
def rootcert(self, apikey=''):
return (self.zap._request_other(self.zap.base_other + 'core/other/rootcert/', {'apikey' : apikey}))
def setproxy(self, proxy, apikey=''):
return (self.zap._request_other(self.zap.base_other + 'core/other/setproxy/', {'proxy' : proxy, 'apikey' : apikey}))
def xmlreport(self, apikey=''):
"""
Generates a report in XML format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/xmlreport/', {'apikey' : apikey}))
def htmlreport(self, apikey=''):
"""
Generates a report in HTML format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/htmlreport/', {'apikey' : apikey}))
def message_har(self, id, apikey=''):
"""
Gets the message with the given ID in HAR format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/messageHar/', {'id' : id, 'apikey' : apikey}))
def messages_har(self, baseurl='', start='', count='', apikey=''):
"""
Gets the HTTP messages sent through/by ZAP, in HAR format, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/messagesHar/', {'baseurl' : baseurl, 'start' : start, 'count' : count, 'apikey' : apikey}))
def send_har_request(self, request, followredirects='', apikey=''):
"""
Sends the first HAR request entry, optionally following redirections. Returns, in HAR format, the request sent and response received and followed redirections, if any.
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/sendHarRequest/', {'request' : request, 'followRedirects' : followredirects, 'apikey' : apikey}))
| 0xkasun/zaproxy | python/api/src/zapv2/core.py | Python | apache-2.0 | 14,644 | 0.010311 |
'''
Created on Jul 26, 2014
@author: lwoydziak
'''
from mockito.mocking import mock
from pertinosdk import PertinoSdk, QueryBuilder, where
from mockito.mockito import when, verify
from mockito.matchers import any, Matcher
class Contains(Matcher):
def __init__(self, sub):
self.sub = sub
def matches(self, arg):
if not hasattr(arg, 'find'):
return
if not self.sub or len(self.sub) <= 0:
return
for sub in self.sub:
if not arg.find(sub) > -1:
return
return True
def __repr__(self):
return "<Contains: '%s'>" % (str(self.sub))
def setupSdk():
requests = mock()
pertinoSdk = PertinoSdk('a', 'b', requests)
response = mock()
when(requests).get(any(), auth=any()).thenReturn(response)
return pertinoSdk, requests, response
def test_CanRetrieveOrganizationListUnfiltered():
pertinoSdk, requests, response = setupSdk()
json = {"orgs": [{"name": "organization", "id": 1234}]}
when(response).json().thenReturn(json)
assert pertinoSdk.listOrgs() == json["orgs"]
verify(requests).get('http://api.labs.pertino.com:5000/api/v0-alpha/orgs?user_key=993e79924d5b6346fe62a5cf62183bc5', auth=('a', 'b'))
def test_CanRetrieveOrganizationListFiltered():
pertinoSdk, _, response = setupSdk()
json = {"orgs": [{"name": "organization", "id": 1234}]}
when(response).json().thenReturn(json)
closure = mock()
pertinoSdk.listOrgs(closure=closure.function)
verify(closure).function(json["orgs"][0])
def test_CanRetrieveDevicesListUnfiltered():
pertinoSdk, requests, response = setupSdk()
json = {"devices": [{"ipv4Address": "123.456.789.10", "hostName": "host", "id": 1234}]}
when(response).json().thenReturn(json)
assert pertinoSdk.listDevicesIn({"id":1}) == json["devices"]
verify(requests).get('http://api.labs.pertino.com:5000/api/v0-alpha/orgs/1/devices?user_key=993e79924d5b6346fe62a5cf62183bc5', auth=any())
def test_CanRetrieveDevicesListFiltered():
pertinoSdk, _, response = setupSdk()
json = {"devices": [{"ipv4Address": "123.456.789.10", "hostName": "host", "id": 1234}]}
when(response).json().thenReturn(json)
closure = mock()
pertinoSdk.listDevicesIn({"id":1}, closure.function)
verify(closure).function(json["devices"][0])
def test_CanDeleteMachine():
pertinoSdk, requests, response = setupSdk()
when(requests).delete(any(), auth=any()).thenReturn(response)
devices = [{"ipv4Address": "123.456.789.10", "hostName": "host", "id": 1234}]
pertinoSdk.deleteFrom({"id":1}, devices)
verify(requests, times=1).delete('http://api.labs.pertino.com:5000/api/v0-alpha/orgs/1/devices/1234?user_key=993e79924d5b6346fe62a5cf62183bc5', auth=any())
def test_CanBuildClosureToFilterApiResponses():
isQueryBuilder = any(QueryBuilder)
assert isQueryBuilder.matches(where("any"))
closure = where("someField").contains("desired")
testDictionaryMatched = {"someField":"desired"}
assert closure(testDictionaryMatched)
testDictionaryNotMatched = {"someField":"nothing"}
assert not closure(testDictionaryNotMatched)
| Pertino/pertino-sdk-python | pertinosdk/tests/pertinosdk_test.py | Python | mit | 3,217 | 0.009636 |
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
| yephper/django | tests/template_tests/syntax_tests/test_exceptions.py | Python | bsd-3-clause | 2,158 | 0.00139 |
#!/usr/env/bin python
# TODO
# 1. fix bug that if a script shares the same name as a material dll to load,
# the material may not be loaded correctly
# 2. fix multiple definitions of c++ -> python conversion for Spectrum
import sys
import os
import math
from libgotham import *
import inspect
def normalize(x):
length = math.sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2])
return (x[0] / length, x[1] / length, x[2] / length)
def cross(a,b):
v = (a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0])
return v
def mul(A, x):
b0 = A[ 0] * x[0] + A[ 1] * x[1] + A[ 2] * x[2] + A[ 3] * x[3]
b1 = A[ 4] * x[0] + A[ 5] * x[1] + A[ 6] * x[2] + A[ 7] * x[3]
b2 = A[ 8] * x[0] + A[ 9] * x[1] + A[10] * x[2] + A[11] * x[3]
b3 = A[12] * x[0] + A[13] * x[1] + A[14] * x[2] + A[15] * x[3]
return (b0, b1, b2, b3)
# define a class named 'PyGotham'
class PyGotham:
# standard shaderpaths
shaderpaths = ['.']
try:
shaderpaths += [os.path.join(os.environ['GOTHAMHOME'], 'shaders')]
except:
print 'Warning: $GOTHAMHOME undefined! Some shaders may not be found.'
# standard texturepaths
texturepaths = ['.']
try:
texturepaths += [os.path.join(os.environ['GOTHAMHOME'], 'textures')]
except:
print 'Warning: $GOTHAMHOME undefined! Some textures may not be found.'
# map texture aliases to texture handles
__textureMap = {}
# cache (shader, parameters) so as not to instantiate redundant shader objects
__shaderCache = {}
def __init__(self):
# by default, the subsystem is plain old Gotham
self.__subsystem = self.__createSubsystem("Gotham")
self.attribute("renderer:subsystem", "Gotham")
# include the directory containing this script
# in Python's search path
dir = os.path.dirname(inspect.getabsfile(mul))
sys.path += [dir]
def __createSubsystem(self, name, copyFrom = None):
result = None
# try to import every file in this directory
# look for the first one with a type of Gotham which matches name
dir = os.path.dirname(inspect.getabsfile(mul))
# try importing each file as a module
for file in os.listdir(dir):
fileBasename = os.path.splitext(file)[0]
try:
module = __import__(fileBasename)
if copyFrom == None:
# call the null constructor
# g = module.name()
exec "result = module." + name + "()"
else:
# call the copy constructor
# g = module.name(copyFrom)
exec "result = module." + name + "(copyFrom)"
del module
except:
pass
# stop at the first thing we were able to create
if result != None:
break;
return result
def pushMatrix(self):
return self.__subsystem.pushMatrix()
def popMatrix(self):
return self.__subsystem.popMatrix()
def translate(self, tx, ty, tz):
return self.__subsystem.translate(tx,ty,tz)
def rotate(self, degrees, rx, ry, rz):
return self.__subsystem.rotate(degrees, rx, ry, rz)
def scale(self, sx, sy, sz):
return self.__subsystem.scale(sx, sy, sz)
def getMatrix(self, m):
return self.__subsystem.getMatrix(m)
def sphere(self, cx, cy, cz, radius):
return self.__subsystem.sphere(cx, cy, cz, radius)
def pushAttributes(self):
return self.__subsystem.pushAttributes()
def popAttributes(self):
return self.__subsystem.popAttributes()
def attribute(self, name, value):
if value == False:
return self.__subsystem.attribute(name, str("false"))
elif value == True:
return self.__subsystem.attribute(name, str("true"))
else:
return self.__subsystem.attribute(name, str(value))
def getAttribute(self, name):
return self.__subsystem.getAttribute(name)
def material(self, name, *parms):
# pack parameters into a dictionary if necessary
parmDict = {}
if len(parms) > 1:
for i in range(0, len(parms), 2):
parmDict[parms[i]] = parms[i+1]
elif len(parms) == 1:
parmDict = parms[0]
# get the parameters and values into a hashable tuple
parmsTuple = tuple(zip(parmDict.keys(), parmDict.values()))
# first look in the cache
shaderHash = (name,parmsTuple).__hash__()
if self.__shaderCache.has_key(shaderHash):
# there's a hit, simply refer to the cached shader
handle = self.__shaderCache[shaderHash]
self.__subsystem.material(handle)
return True
else:
# XXX this is getting ugly
# add shaderpaths to os.path temporarily
oldpath = sys.path
sys.path += self.shaderpaths
#try:
# import the material
module = __import__(name)
# create a new material
m = module.createMaterial()
# set each parameter
for (p, val) in parmsTuple:
try:
setMethod = getattr(m, 'set_' + p)
try:
# first try to set it as if it were a 3-vector
setMethod(val[0], val[1], val[2])
except:
try:
# try a scalar instead
setMethod(val)
except:
print 'Warning: value %s for parameter %s has unknown type; material parameter left undefined.' % (val,p)
except:
print 'Warning: "%s" is not a parameter of material "%s"!' % (p, name)
# bind any dangling texture references
for member in dir(m):
handle = 0
alias = ''
try:
exec 'alias = m.%s.mAlias' % member
exec 'handle = m.%s.mHandle' % member
except:
continue;
if handle == 0 and alias != '':
# create the texture
exec 'm.%s.mHandle = self.texture(alias)' % member
del module
# send the material to the subsystem
materialHandle = self.__subsystem.material(m)
# cache the material
self.__shaderCache[shaderHash] = materialHandle
result = True
#except:
# print "Unable to find material '%s'." % name
# result = False
# restore paths
sys.path = oldpath
return result
def texture(self, *args):
# validate arguments
if len(args) != 1 and len(args) != 3:
raise ValueError, "texture() expects one (filename) or three (width,height,pixels) arguments."
if len(args) == 1:
name = args[0]
# find the file
for dir in self.texturepaths:
#fullpath = os.path.join(dir, name)
fullpath = dir + '/' + name
if os.path.exists(fullpath):
# does this texture exist?
try:
return self.__textureMap[fullpath]
except:
try:
result = self.__subsystem.texture(fullpath)
self.__textureMap[fullpath] = result
return result
except:
print "Warning: unable to load image file '%s'." % fullpath
return 0
else:
print fullpath, 'does not exist'
print "Warning: '%s' not found." % name
# return a reference to the default texture
return 0
if len(args) == 3:
# convert to a vector
pixels = args[0]
pixels = vector_float()
pixels[:] = args[2]
return self.__subsystem.texture(args[0],args[1],pixels)
def mesh(self, *args):
# validate arguments
if (len(args) != 2 and len(args) != 3) and len(args) != 4:
raise ValueError, "mesh() expects either two (points,indices), three (points,parms,indices), or four (points,parms,indices,normals) arguments."
# convert to vectors
points = args[0]
pointsvec = vector_float()
pointsvec[:] = points
if len(args) == 2:
faces = args[1]
elif len(args) == 3:
faces = args[2]
elif len(args) == 4:
faces = args[2]
# validate faces
if len(faces) == 0:
print 'mesh(): Warning: empty mesh detected.'
return
if (len(faces) % 3) != 0:
raise ValueError, "Triangle list not a multiple of 3!"
i = 0
for v in faces:
if v >= len(points) / 3:
raise ValueError, "Triangle %d refers to non-vertex!" % (i/3)
i += 1
facesvec = vector_uint()
facesvec[:] = faces
if len(args) == 2:
return self.__subsystem.mesh(pointsvec, facesvec)
if len(args) > 2:
parmsvec = vector_float()
parmsvec[:] = args[1]
if len(args) == 3:
return self.__subsystem.mesh(pointsvec, parmsvec, facesvec)
else:
normsvec = vector_float()
normsvec[:] = args[3]
return self.__subsystem.mesh(pointsvec, parmsvec, normsvec, facesvec)
def multMatrix(self, m):
matrix = vector_float()
matrix[:] = m
return self.__subsystem.multMatrix(matrix)
def loadMatrix(self, m):
matrix = vector_float()
matrix[:] = m
return self.__subsystem.loadMatrix(matrix)
def render(self, *args):
if len(args) > 0:
print "render((w,h), spp): Warning: using arguments with this function is deprecated."
print "Please use render() instead."
if len(args) == 1:
PyGotham.attribute(self, "record:width", str(args[0][0]))
PyGotham.attribute(self, "record:height", str(args[0][1]))
if len(args) > 2:
print "Error: too many parameters to render()."
return
elif len(args) == 2:
PyGotham.attribute(self, "record:width", str(args[0][0]))
PyGotham.attribute(self, "record:height", str(args[0][1]))
PyGotham.attribute(self, "renderer:spp", str(args[1]))
# finalize the subsystem
subsys = self.getAttribute("renderer:subsystem")
if subsys != "":
# do we need to change subsystems?
if type(self.__subsystem).__name__ != subsys:
try:
# create a copy using the new subsystem type
self.__subsystem = self.__createSubsystem(subsys, self.__subsystem)
except:
print 'Warning: Could not create subsystem named', subsys
return self.__subsystem.render()
def lookAt(self, eye, center, up):
# see gluLookAt man page: we construct the
# transpose of their matrix and inverse of their
# translation
f = (float(center[0] - eye[0]),
float(center[1] - eye[1]),
float(center[2] - eye[2]))
f = normalize(f)
up = normalize(up)
s = cross(f,up)
u = cross(s,f)
M = vector_float()
M[:] = [s[0], u[0], -f[0], 0.0,
s[1], u[1], -f[1], 0.0,
s[2], u[2], -f[2], 0.0,
0.0, 0.0, 0.0, 1.0]
self.translate(eye[0], eye[1], eye[2])
self.multMatrix(M)
def pointlight(self, position, power, radius = 0.0005):
self.pushAttributes()
self.material('light', 'power', power)
#self.sphere(position[0], position[1], position[2], radius)
self.pushMatrix()
self.translate(position[0], position[1], position[2])
self.scale(2.0 * radius, 2.0 * radius, 2.0 * radius)
self.unitcube()
self.popMatrix()
self.popAttributes()
def camera(self, aspect, fovy, apertureRadius):
# create a small rectangle for the aperture
# centered at the 'eye' point
epsilon = 0.0005
apertureRadius += epsilon
# the aperture starts out as a unit square with normal pointing in the
# -z direction
points = [-0.5, -0.5, 0, -0.5, 0.5, 0, 0.5, 0.5, 0, 0.5, -0.5, 0]
uv = [0,0, 1,0, 1,1, 0,1]
triangles = [0, 1, 3, 1, 2, 3]
self.pushMatrix()
self.scale(apertureRadius/2, apertureRadius/2, apertureRadius/2)
# compute the center of the aperture in world coordinates by multiplying
# (0,0,0) by the current matrix
# assign the perspective material
m = vector_float()
self.getMatrix(m)
c = mul(m, (0,0,0,1))
up = mul(m, (0,1,0,0))
up = (up[0],up[1],up[2])
up = normalize(up)
right = mul(m, (1,0,0,0))
right = (right[0],right[1],right[2])
right = normalize(right)
look = mul(m, (0,0,-1,0))
look = (look[0],look[1],look[2])
look = normalize(look)
self.pushAttributes()
# convert to radians
fovyRadians = fovy * (math.pi/180.0)
# compute the location of the lower-left corner of the viewport,
# in world coordinates
near = 1.0 / math.tan(0.5 * fovyRadians)
#ll = c + near * look - aspect * right - up
ll = (c[0] + near * look[0] - aspect * right[0] - up[0],
c[1] + near * look[1] - aspect * right[1] - up[1],
c[2] + near * look[2] - aspect * right[2] - up[2])
self.material('perspective',
'aspect',aspect,
'lowerLeft', ll)
# name the camera
self.attribute("name", "camera")
self.mesh(points, uv, triangles)
self.popAttributes()
self.popMatrix()
# hint to the viewer after we've popped the attributes
# XXX we really need a way to pass types besides strings
self.attribute("viewer:fovy", str(fovy))
self.attribute("viewer:eyex", str(c[0]))
self.attribute("viewer:eyey", str(c[1]))
self.attribute("viewer:eyez", str(c[2]))
self.attribute("viewer:upx", str(up[0]))
self.attribute("viewer:upy", str(up[1]))
self.attribute("viewer:upz", str(up[2]))
self.attribute("viewer:lookx", str(look[0]))
self.attribute("viewer:looky", str(look[1]))
self.attribute("viewer:lookz", str(look[2]))
def parse(self, lines):
# XXX we can think about passing each line
# to a super-efficient parser in C++
# rather than calling Python's exec
# because it is slow
numLines = len(lines)
# add one to avoid modulo by zero
fivePercent = numLines / 20 + 1
lineNumber = 0
print 'Parsing...'
for line in lines:
if lineNumber % fivePercent == 0:
print 'Progress: ' + str(100 * float(lineNumber)/numLines) + '%\r',
sys.stdout.flush()
# first see if we can parse it quickly in c++
if not self.__subsystem.parseLine(line):
# each line depends on 'g' being defined as some Gotham object
g = self
exec line in globals()
lineNumber += 1
print '\nDone.'
def unitcube(self):
unitSquare = ([-0.5, 0, 0.5,
0.5, 0, 0.5,
0.5, 0, -0.5,
-0.5, 0, -0.5],
[ 0, 0,
1, 0,
1, 1,
0, 1],
[ 0, 1, 3,
1, 2, 3])
# front wall
self.pushMatrix()
self.translate(0, 0, 0.5)
self.rotate(90, 1, 0, 0)
self.mesh(unitSquare[0], unitSquare[1], unitSquare[2])
self.popMatrix()
# left wall
self.pushMatrix()
self.translate(-0.5,0,0)
self.rotate(90, 0, 0, 1)
self.mesh(unitSquare[0], unitSquare[1], unitSquare[2])
self.popMatrix()
# right wall
self.pushMatrix()
self.translate(0.5,0,0)
self.rotate(-90, 0, 0, 1)
self.mesh(unitSquare[0], unitSquare[1], unitSquare[2])
self.popMatrix()
# back wall
self.pushMatrix()
self.translate(0, 0, -0.5)
self.rotate(-90, 1, 0, 0)
self.mesh(unitSquare[0], unitSquare[1], unitSquare[2])
self.popMatrix()
# ceiling
self.pushMatrix()
self.translate(0, 0.5,0)
self.mesh(unitSquare[0], unitSquare[1], unitSquare[2])
self.popMatrix()
# floor
self.pushMatrix()
self.translate(0, -0.5,0)
self.rotate(180, 1, 0, 0)
self.mesh(unitSquare[0], unitSquare[1], unitSquare[2])
self.popMatrix()
def __copyright():
print 'Gotham 0.1'
print '(c) Copyright 2007-2012 Jared Hoberock. All Rights Reserved.'
# print copyright info as soon as this is imported
__copyright()
# wrap up the api in a singleton
# create the 'canonical' Gotham instance
# this is not technically a singleton but
# the idea is to make it work like one
__gGotham = PyGotham()
def __wrapMethod(name, wrapperName):
firstLine = 'def ' + wrapperName + '(*args, **kwargs):\n'
secondLine = ' return __gGotham.' + name + '(*args, **kwargs)\n'
return firstLine + secondLine
# now wrap up the api in gGotham
for member in dir(__gGotham):
# ignore standard stuff beginning '__'
if member[0] != '_' and inspect.ismethod(getattr(__gGotham, member)):
wrapperName = member[0].upper() + member[1:]
exec __wrapMethod(member, wrapperName)
| jaredhoberock/gotham | api/api.py | Python | apache-2.0 | 16,127 | 0.01637 |
import numpy as np
import seekpath
class Band:
def __init__(self,
phonon,
num_qpoints=101):
self._phonon = phonon # Phonopy object
self._num_qpoints = num_qpoints
self._band = []
self._labels = None
self._connected = None
def run(self):
unitcell = self._phonon.unitcell
cell = (unitcell.get_cell(),
unitcell.get_scaled_positions(),
unitcell.get_atomic_numbers())
band_path = seekpath.get_path(cell)
self._set_band(band_path)
self._set_labels(band_path)
return self._run_band()
def get_band(self):
return self._phonon.get_band_structure()
def plot_band(self, plt, delta_d=0.02):
fig, ax = plt.subplots()
_, distances, frequencies, _ = self._phonon.get_band_structure()
d_shift = 0
d_point = []
special_points = []
unconnected_points = [0]
for d, f, c in zip(distances, frequencies, self._connected):
special_points.append(d[0] + d_shift)
if not c:
d_shift += delta_d
special_points.append(d[0] + d_shift)
unconnected_points.append(special_points[-2])
unconnected_points.append(special_points[-1])
plt.plot(d + d_shift, f, 'r-', linewidth=1)
special_points.append(distances[-1][-1] + d_shift)
unconnected_points.append(special_points[-1])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
plt.ylabel('Frequency (THz)')
plt.xlabel('Wave vector')
plt.xlim(0, special_points[-1])
plt.xticks(special_points, self._labels)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
for d in unconnected_points:
plt.axvline(x=d, linestyle='-', linewidth=1.5, color='k')
x_pairs = np.reshape(unconnected_points, (-1, 2))
x_pairs /= unconnected_points[-1]
ymin, ymax = ax.get_ylim()
for pair in x_pairs:
plt.axhline(y=0, xmin=pair[0], xmax=pair[1],
linestyle=':', linewidth=0.5, color='b')
plt.axhline(y=ymin, xmin=pair[0], xmax=pair[1],
linestyle='-', linewidth=1.5, color='k')
plt.axhline(y=ymax, xmin=pair[0], xmax=pair[1],
linestyle='-', linewidth=1.5, color='k')
fig.tight_layout()
def write_band_yaml(self):
self._phonon.write_yaml_band_structure()
def save_band(self, plt):
plt.savefig("band.png")
def _set_band(self, band_path):
point_coords = band_path['point_coords']
for path in band_path['path']:
self._append_band(point_coords[path[0]], point_coords[path[1]])
def _set_labels(self, band_path):
labels = []
prev_path = None
connected = []
point_coords = band_path['point_coords']
for path in band_path['path']:
if prev_path and prev_path[1] != path[0]:
labels.append(prev_path[1])
connected.append(False)
else:
connected.append(True)
labels.append(path[0])
prev_path = path
labels.append(prev_path[1])
for i, l in enumerate(labels):
if 'GAMMA' in l:
labels[i] = "$" + l.replace("GAMMA", "\Gamma") + "$"
elif 'SIGMA' in l:
labels[i] = "$" + l.replace("SIGMA", "\Sigma") + "$"
elif 'DELTA' in l:
labels[i] = "$" + l.replace("DELTA", "\Delta") + "$"
else:
labels[i] = "$\mathrm{%s}$" % l
self._labels = labels
self._connected = connected
def _append_band(self, q_start, q_end):
band = []
nq = self._num_qpoints
for i in range(nq):
band.append(np.array(q_start) +
(np.array(q_end) - np.array(q_start)) / (nq - 1) * i)
self._band.append(band)
def _run_band(self):
return self._phonon.set_band_structure(self._band)
if __name__ == '__main__':
import os
import sys
import yaml
from phonopy import Phonopy
from phonopy.interface.phonopy_yaml import get_unitcell_from_phonopy_yaml
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib
def frac2val(string):
if '/' in string:
num, denom = [float(x) for x in string.split('/')]
return num / denom
else:
return float(string)
if len(sys.argv) > 1:
cell = get_unitcell_from_phonopy_yaml(sys.argv[1])
else:
cell = get_unitcell_from_phonopy_yaml("POSCAR-unitcell.yaml")
phonon_info = yaml.load(open("phonon.yaml"))
cell = get_unitcell_from_phonopy_yaml("POSCAR-unitcell.yaml")
phonon = None
if os.path.isfile("phonopy.conf"):
with open("phonopy.conf") as f:
for line in f:
if 'PRIMITIVE_AXIS' in line:
prim_vals = [frac2val(x) for x in line.split()[2:]]
if len(prim_vals) == 9:
primitive_matrix = np.reshape(prim_vals, (3, 3))
phonon = Phonopy(cell,
phonon_info['supercell_matrix'],
primitive_matrix=primitive_matrix)
else:
print("PRIMITIVE_AXIS is something wrong.")
sys.exit(1)
break
if phonon is None:
phonon = Phonopy(cell, phonon_info['supercell_matrix'])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
if os.path.isfile("BORN"):
with open("BORN") as f:
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
nac_params['factor'] = 14.399652
phonon.set_nac_params(nac_params)
band = Band(phonon, num_qpoints=101)
if band.run():
band.write_band_yaml()
_, distances, frequencies, _ = band.get_band()
d_end = distances[-1][-1]
f_max = np.max(frequencies)
primitive = phonon.get_primitive()
num_atom = primitive.get_number_of_atoms()
length = num_atom ** (1.0 / 3) * 4.5
figsize_x = d_end * length
margin = 0.7
scale = 0.15
delta_d = d_end / (figsize_x - margin) * scale
matplotlib.use('Agg')
matplotlib.rcParams.update({'figure.figsize': (figsize_x, 3.1),
'font.family': 'serif'})
import matplotlib.pyplot as plt
band.plot_band(plt, delta_d=(delta_d))
band.save_band(plt)
| atztogo/phonondb | phonondb/phonopy/band.py | Python | bsd-3-clause | 7,188 | 0.001113 |
# -*- coding: UTF-8 -*-
from django.core.management.base import BaseCommand
from optparse import make_option
import daemon
import daemon.pidfile
from signal import SIGTSTP, SIGTERM, SIGABRT
import sys, os, subprocess
import time
from jukebox.jukebox_core import api
class Command(BaseCommand):
daemon = None
proc = None
mpg123 = None
option_list = BaseCommand.option_list + (
make_option(
"--start",
action="store_true",
dest="start",
help="Start mpg123 playback"
),
make_option(
"--stop",
action="store_true",
dest="stop",
help="Stop mpg123 playback"
),
)
def handle(self, *args, **options):
# check if mpg123 is available
fin, fout = os.popen4(["which", "mpg123"])
self.mpg123 = fout.read().replace("\n", "")
if not len(self.mpg123):
print "mpg123 is not installed"
return
pidFile = os.path.dirname(
os.path.abspath(__file__)
) + "/../../daemon.pid"
if options["start"]:
if os.path.exists(pidFile):
print "Daemon already running, pid file exists"
return
pid = daemon.pidfile.TimeoutPIDLockFile(
pidFile,
10
)
print "Starting jukebox_mpg123 daemon..."
self.daemon = daemon.DaemonContext(
uid=os.getuid(),
gid=os.getgid(),
pidfile=pid,
working_directory=os.getcwd(),
detach_process=True,
signal_map={
SIGTSTP: self.shutdown,
SIGABRT: self.skipSong
}
)
with self.daemon:
print "Register player"
pid = int(open(pidFile).read())
players_api = api.players()
players_api.add(pid)
self.play()
elif options["stop"]:
if not os.path.exists(pidFile):
print "Daemon not running"
return
print "Stopping daemon..."
pid = int(open(pidFile).read())
os.kill(pid, SIGTSTP)
print "Unregister player " + str(pid)
players_api = api.players()
players_api.remove(pid)
else:
self.print_help("jukebox_mpg123", "help")
def play(self):
songs_api = api.songs()
while 1:
if self.proc is None:
song_instance = songs_api.getNextSong()
if not os.path.exists(song_instance.Filename):
print "File not found: %s" % song_instance.Filename
continue
print "Playing " + song_instance.Filename
self.proc = subprocess.Popen(
[self.mpg123, song_instance.Filename]
)
else:
if not self.proc.poll() is None:
self.proc = None
time.sleep(0.5)
def shutdown(self, signal, action):
if not self.proc is None:
os.kill(self.proc.pid, SIGTERM)
if not self.daemon is None:
self.daemon.close()
sys.exit(0)
def skipSong(self, signal, action):
if not self.proc is None:
os.kill(self.proc.pid, SIGTERM)
| lociii/jukebox_mpg123 | jukebox_mpg123/management/commands/jukebox_mpg123.py | Python | mit | 3,441 | 0.001453 |
# -*- coding:utf-8 -*-
from collections import defaultdict
import numpy
class ThompsonAgent:
def __init__(self, seed=None):
self._succeeds = defaultdict(int)
self._fails = defaultdict(int)
self._np_random = numpy.random.RandomState(seed)
def choose(self, arms, features=None):
return max(arms, key=lambda arm: self._score(arm))
def _score(self, arm):
return self._np_random.beta(
self._succeeds[arm] + 0.5,
self._fails[arm] + 0.5)
def update(self, arm, reward, arms=None, features=None):
if reward > 0:
self._succeeds[arm] += 1
else:
self._fails[arms] += 1
| ohtaman/pynm | pynm/reinforce/bandit/thompson.py | Python | mit | 683 | 0.001464 |
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "07 Jan 2016"
PI = 3.1415926535897932384626433832795
PI2 = 6.283185307179586476925286766559
SQRT2PI = PI2**0.5 # =2.5066282746310002
SQ3 = 1.7320508075688772935274463415059
SQ2 = 2**0.5 # =1.4142135623730951
SQPI = PI**0.5 # =1.7724538509055159
SIE0 = 1.602176565e-19
#E0 = 4.803e-10 # [esu]
C = 2.99792458e10 # [cm/sec]
E0 = SIE0 * C / 10
M0 = 9.109383701528e-28 # [g]
SIM0 = 9.109383701528e-31
M0C2 = 0.510998928 # MeV
HPLANCK = 6.626069573e-27 # [erg*sec]
EV2ERG = 1.602176565e-12 # Energy conversion from [eV] to [erg]
K2B = 2 * PI * M0 * C**2 * 0.001 / E0 # =10.710201593926415
# EMC = SIE0 / SIM0 / C[mm]
EMC = 0.5866791802416487
SIHPLANCK = 6.626069573e-34
#SIM0 = M0 * 1e-3
SIC = C * 1e-2
FINE_STR = 1 / 137.03599976
#E2W = PI2 * SIE0 / SIH # w = E2W * E[eV]
E2W = 1519267514747457.9195337718065469
E2WC = 5067.7309392068091
R0 = 2.817940285e-5 # A
AVOGADRO = 6.02214199e23 # atoms/mol
CHeVcm = HPLANCK * C / EV2ERG # {c*h[eV*cm]} = 0.00012398419297617678
CH = CHeVcm * 1e8 # {c*h[eV*A]} = 12398.419297617678
CHBAR = CH / PI2 # {c*h/(2pi)[eV*A]} = 1973.2697177417986
| kklmn/xrt | xrt/backends/raycing/physconsts.py | Python | mit | 1,189 | 0.003364 |
from chimera.core.callback import callback
from chimera.core.exceptions import printException
from chimera.gui.modules.canvas import FITS, FITSCanvas
from chimera.gui.module import ChimeraGUIModule
from chimera.interfaces.camera import CameraStatus
from chimera.util.image import Image
import gtk
import glib
import gdl
import time
import urllib
import os
import threading
import copy
class ImageViewer:
def __init__(self, main):
self.main = main
self.notebook = self.main.builder.get_object("imagesNotebook")
self.notebook.append_page(gtk.Label("No images"))
self.first_image = True
def newImage(self, image):
fits = FITS(image.filename())
canvas = FITSCanvas(fits.frame)
if self.first_image:
self.notebook.remove_page(0)
self.first_image = False
tab_num = self.notebook.append_page(
canvas.window, gtk.Label(os.path.basename(image.filename())))
self.notebook.set_current_page(tab_num)
class CameraController:
def __init__(self, module):
self.module = module
self.camera = None
self.wheel = None
def setCamera(self, camera):
self.camera = camera
@callback(self.module.manager)
def exposeBegin(request):
self.module.view.exposeBegin(request)
@callback(self.module.manager)
def exposeComplete(request, status):
if status == CameraStatus.OK:
self.module.view.exposeComplete(request)
else:
self.module.view.abort()
@callback(self.module.manager)
def readoutBegin(request):
self.module.view.readoutBegin(request)
@callback(self.module.manager)
def readoutComplete(image, status):
if status == CameraStatus.OK:
self.module.view.readoutComplete(image)
else:
self.module.view.abort()
self.camera.exposeBegin += exposeBegin
self.camera.exposeComplete += exposeComplete
self.camera.readoutBegin += readoutBegin
self.camera.readoutComplete += readoutComplete
def getCamera(self):
# create a copy of Proxy to make sure multiple threads don't reuse it
return copy.copy(self.camera)
def setFilterWheel(self, wheel):
self.wheel = wheel
def getWheel(self):
# transfer to current thread and return (a hacky way to reuse Proxies)
self.wheel._transferThread()
return self.wheel
def expose(self):
camera = self.getCamera()
durationSpin = self.module.builder.get_object("durationSpin")
duration = durationSpin.get_value()
framesSpin = self.module.builder.get_object("framesSpin")
frames = framesSpin.get_value()
shutterOpen = self.module.builder.get_object("shutterOpen")
if(shutterOpen.get_active()):
shutterState = "OPEN"
else:
shutterState = "CLOSE"
filters = self.module.builder.get_object(
"filtersBox").get_children()[1].get_children()
current = None
for f in filters:
if f.get_active():
current = f
filterName = current.get_label()
self.module.view.begin(duration, frames)
if self.getWheel().getFilter() != filterName:
self.module.view.beginFilterChange(filterName)
self.getWheel().setFilter(filterName)
self.module.view.endFilterChange(filterName)
try:
camera.expose({"exptime": duration,
"frames": frames,
"shutter": shutterState})
except Exception, e:
printException(e)
finally:
self.module.view.end()
def abortExposure(self):
self.getCamera().abortExposure()
self.module.view.abort()
class CameraView:
def __init__(self, module):
self.module = module
self.exposureStatusbar = self.module.builder.get_object(
"exposureStatusbar")
self.exposureLabel = self.module.builder.get_object("exposureLabel")
self.exposureProgress = self.module.builder.get_object(
"exposureProgress")
self.exposureLabel.hide()
self.exposureProgress.hide()
self.exposureProgress.set_pulse_step(0.1)
self.frames = 0
self.exptime = 0
self.currentFrame = 0
self.exposeTimer = None
self.readoutTimer = None
self.filterTimer = None
def begin(self, exptime, frames):
self.frames = frames
self.exptime = exptime
self.currentFrame = 0
def ui():
self.module.builder.get_object(
"abortExposureButton").set_sensitive(True)
self.module.builder.get_object("exposeButton").set_sensitive(False)
self.exposureLabel.set_label(
"<b>%-2d/%-2d</b>" % (self.currentFrame, self.frames))
self.exposureProgress.set_fraction(0.0)
self.exposureProgress.set_text("starting ...")
self.exposureLabel.show()
self.exposureProgress.show()
glib.idle_add(ui)
def exposeBegin(self, imageRequest):
startTime = time.time()
timeout = startTime + self.exptime
self.currentFrame += 1
def ui():
self.exposureLabel.set_label(
"<b>%-2d/%-2d</b>" % (self.currentFrame, self.frames))
self.exposureProgress.set_fraction(0.0)
def exposeTimer(startTime, timeout):
now = time.time()
if now >= timeout:
return False
counter = now - startTime
self.exposureProgress.set_fraction(counter / self.exptime)
self.exposureProgress.set_text(
"%.2f" % (self.exptime - counter))
return True
self.exposeTimer = glib.timeout_add(
100, exposeTimer, startTime, timeout)
glib.idle_add(ui)
def exposeComplete(self, imageRequest):
def ui():
self.exposureProgress.set_fraction(1.0)
self.exposureProgress.set_text("exposure complete ...")
if self.exposeTimer:
glib.source_remove(self.exposeTimer)
self.exposeTimer = 0
glib.idle_add(ui)
def readoutBegin(self, imageRequest):
def ui():
self.exposureProgress.set_text("reading out and saving ...")
def readoutTimer():
self.exposureProgress.pulse()
return True
self.readoutTimer = glib.timeout_add(100, readoutTimer)
glib.idle_add(ui)
def readoutComplete(self, image):
if self.readoutTimer:
glib.source_remove(self.readoutTimer)
self.readoutTimer = 0
def ui():
self.exposureProgress.set_fraction(1.0)
self.exposureProgress.set_text("readout and save complete ...")
url = image.http()
imageFileName = urllib.urlretrieve(
url, filename=os.path.basename(image.filename()))[0]
imageFile = Image.fromFile(imageFileName)
self.module.imageViewer.newImage(imageFile)
glib.idle_add(ui)
def end(self):
def ui():
self.exposureLabel.hide()
self.exposureProgress.hide()
self.module.builder.get_object(
"abortExposureButton").set_sensitive(False)
self.module.builder.get_object("exposeButton").set_sensitive(True)
glib.idle_add(ui)
def abort(self):
def ui():
self.exposureProgress.set_text("aborted!")
self.module.builder.get_object(
"abortExposureButton").set_sensitive(False)
self.module.builder.get_object("exposeButton").set_sensitive(True)
def abortTimer():
self.exposureLabel.hide()
self.exposureProgress.hide()
return False
glib.timeout_add(2000, abortTimer)
if self.exposeTimer:
glib.source_remove(self.exposeTimer)
self.exposeTimer = 0
if self.readoutTimer:
glib.source_remove(self.readoutTimer)
self.readoutTimer = 0
glib.idle_add(ui)
def beginFilterChange(self, filterName):
def filterTimer():
self.exposureProgress.pulse()
return True
self.filterTimer = glib.timeout_add(50, filterTimer)
def ui():
self.exposureProgress.set_text(
"switching to filter %s ..." % filterName)
glib.idle_add(ui)
def endFilterChange(self, filterName):
if self.filterTimer:
glib.source_remove(self.filterTimer)
self.filterTimer = 0
def ui():
self.exposureProgress.set_fraction(1.0)
self.exposureProgress.set_text("filter switch complete!")
glib.idle_add(ui)
class CameraGUIModule(ChimeraGUIModule):
module_controls = {"camera": "Camera",
"wheel": "FilterWheel"}
def __init__(self, manager):
ChimeraGUIModule.__init__(self, manager)
self.view = None
self.controller = None
def setupGUI(self, objects):
camera = objects.get("camera", None)
wheel = objects.get("wheel", None)
self.builder = gtk.Builder()
self.builder.add_from_file(
os.path.join(os.path.dirname(__file__), "camera.xml"))
self.view = CameraView(self)
self.controller = CameraController(self)
self.imageViewer = ImageViewer(self)
self.controller.setCamera(camera)
self.controller.setFilterWheel(wheel)
# some UI tweaks
self.builder.get_object("durationSpin").set_value(1)
self.builder.get_object("framesSpin").set_value(1)
self.builder.get_object("shutterOpen").set_active(1)
if wheel:
# create filter box
filters = wheel.getFilters()
hbox = gtk.HBox()
first = gtk.RadioButton(None, filters[0])
hbox.pack_start(first)
for filter in filters[1:]:
radio = gtk.RadioButton(first, filter)
hbox.pack_start(radio)
hbox.show_all()
self.builder.get_object("filtersBox").pack_start(hbox)
self.builder.get_object("abortExposureButton").set_sensitive(False)
win = self.builder.get_object("window")
gui = self.builder.get_object("gui")
win.remove(gui)
return [("Camera", gui, gdl.DOCK_LEFT)]
def setupEvents(self):
def camera_expose_action(action):
self.builder.get_object("abortExposureButton").set_sensitive(True)
self.builder.get_object("exposeButton").set_sensitive(False)
threading.Thread(target=self.controller.expose).start()
def camera_abort_action(action):
threading.Thread(target=self.controller.abortExposure).start()
self.builder.connect_signals(
{"camera_expose_action": camera_expose_action,
"camera_abort_action": camera_abort_action})
| ankanaan/chimera | src/chimera/gui/modules/camera.py | Python | gpl-2.0 | 11,323 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyResource(Model):
"""The network resource topology information for the given resource group.
:param name: Name of the resource.
:type name: str
:param id: ID of the resource.
:type id: str
:param location: Resource location.
:type location: str
:param associations: Holds the associations the resource has with other
resources in the resource group.
:type associations:
list[~azure.mgmt.network.v2017_11_01.models.TopologyAssociation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'associations': {'key': 'associations', 'type': '[TopologyAssociation]'},
}
def __init__(self, *, name: str=None, id: str=None, location: str=None, associations=None, **kwargs) -> None:
super(TopologyResource, self).__init__(**kwargs)
self.name = name
self.id = id
self.location = location
self.associations = associations
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/topology_resource_py3.py | Python | mit | 1,570 | 0.005096 |
"""Tests for the activitylog."""
from datetime import datetime
from nose.tools import eq_
import amo
import amo.tests
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
class LogTest(amo.tests.TestCase):
def setUp(self):
u = UserProfile.objects.create(username='foo')
amo.set_user(u)
def test_details(self):
"""
If we get details, verify they are stored as JSON, and we get out what
we put in.
"""
a = Webapp.objects.create(name='kumar is awesome')
magic = dict(title='no', body='way!')
al = amo.log(amo.LOG.DELETE_REVIEW, 1, a, details=magic)
eq_(al.details, magic)
eq_(al._details, '{"body": "way!", "title": "no"}')
def test_created(self):
"""
Verify that we preserve the create date.
"""
al = amo.log(amo.LOG.CUSTOM_TEXT, 'hi', created=datetime(2009, 1, 1))
eq_(al.created, datetime(2009, 1, 1))
| ngokevin/zamboni | apps/amo/tests/test_log.py | Python | bsd-3-clause | 976 | 0 |
import poplib
import email
import time
class MailHelper:
def __init__(self, app):
self.app = app
def get_mail(self, username, password, subject):
for i in range (5):
pop = poplib.POP3(self.app.config['james']['host'])
pop.user(username)
pop.pass_(password)
num = pop.stat()[0]
if num>0:
for n in range(num):
msglines = pop.retr(n+1)[1]
msgtext = "\n".join(map(lambda x: x.decode('utf-8'), msglines))
msg = email.message_from_string(msgtext)
if msg.get('Subject') == subject:
pop.dele(n+1)
pop.close()
return msg.get_payload()
pop.close()
time.sleep(3)
return None
| Droriel/python_training_mantis | fixture/mail.py | Python | apache-2.0 | 858 | 0.004662 |
# Can't be run at the moment until migration with openstack-client
# Copyright (c) 2018 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit.extensions import base as test_extensions_base
from neutronclient.v2_0 import client
OPENSTACK_CLI_ID = "/ccf/classifications"
ASSOCS_PATH = "/ccf/classifications"
NET_ASSOC_ID = "uuid_client_foo"
class OpenstackClientTestCase(test_extensions_base.ExtensionTestCase):
def setUp(self):
super(OpenstackClientTestCase, self).setUp()
self.client = client.Client()
self.client.list_ext = mock.Mock()
self.client.create_ext = mock.Mock()
self.client.show_ext = mock.Mock()
self.client.update_ext = mock.Mock()
self.client.delete_ext = mock.Mock()
print("self.client keys: ", dir(self.client))
def test_client_url_list(self):
self.client.ListIPV4Classification(OPENSTACK_CLI_ID)
self.client.list_ext.assert_called_once_with(mock.ANY, ASSOCS_PATH,
mock.ANY)
def test_client_url_create(self):
self.client.CreateIPV4Classification(OPENSTACK_CLI_ID, {})
self.client.create_ext.assert_called_once_with(ASSOCS_PATH, mock.ANY)
def test_client_url_show(self):
self.client.ShowIPV4Classification(NET_ASSOC_ID, OPENSTACK_CLI_ID)
self.client.show_ext.assert_called_once_with(ASSOCS_PATH,
NET_ASSOC_ID)
def test_client_url_update(self):
self.client.UpdateIPV4Classification(NET_ASSOC_ID,
OPENSTACK_CLI_ID, {})
self.client.update_ext.assert_called_once_with(ASSOCS_PATH,
NET_ASSOC_ID,
mock.ANY)
def test_client_url_delete(self):
self.client.DeleteIPV4Classification(NET_ASSOC_ID, OPENSTACK_CLI_ID)
self.client.delete_ext.assert_called_once_with(ASSOCS_PATH,
NET_ASSOC_ID)
| openstack/neutron-classifier | neutron_classifier/tests/unit/cli/__test_db_classifications.py | Python | apache-2.0 | 2,667 | 0 |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class ElggCalendarEvents(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
calendar = models.IntegerField()
title = models.CharField(max_length=765, blank=True)
description = models.TextField()
access = models.CharField(max_length=765, blank=True)
location = models.CharField(max_length=150, blank=True)
date_start = models.IntegerField()
date_end = models.IntegerField()
class Meta:
db_table = u'elgg_calendar_events'
class ElggCcAccess(models.Model):
id = models.IntegerField(primary_key=True)
gunid = models.CharField(max_length=765, blank=True)
token = models.BigIntegerField(null=True, blank=True)
chsum = models.CharField(max_length=96, blank=True)
ext = models.CharField(max_length=384, blank=True)
type = models.CharField(max_length=60, blank=True)
parent = models.BigIntegerField(null=True, blank=True)
owner = models.IntegerField(null=True, blank=True)
ts = models.DateTimeField()
class Meta:
db_table = u'elgg_cc_access'
class ElggCcGunid(models.Model):
id = models.IntegerField(primary_key=True)
type = models.CharField(max_length=765, blank=True)
objid = models.IntegerField(null=True, blank=True)
gunid = models.CharField(max_length=765, blank=True)
class Meta:
db_table = u'elgg_cc_gunid'
class ElggCcTransport(models.Model):
id = models.IntegerField(primary_key=True)
trtoken = models.CharField(max_length=48, blank=True)
direction = models.CharField(max_length=384, blank=True)
state = models.CharField(max_length=384, blank=True)
trtype = models.CharField(max_length=384, blank=True)
lock = models.CharField(max_length=3, blank=True)
target = models.CharField(max_length=765, blank=True)
rtrtok = models.CharField(max_length=48, blank=True)
mdtrtok = models.CharField(max_length=48, blank=True)
gunid = models.BigIntegerField(null=True, blank=True)
pdtoken = models.BigIntegerField(null=True, blank=True)
url = models.CharField(max_length=765, blank=True)
localfile = models.CharField(max_length=765, blank=True)
fname = models.CharField(max_length=765, blank=True)
title = models.CharField(max_length=765, blank=True)
expectedsum = models.CharField(max_length=96, blank=True)
realsum = models.CharField(max_length=96, blank=True)
expectedsize = models.IntegerField(null=True, blank=True)
realsize = models.IntegerField(null=True, blank=True)
uid = models.IntegerField(null=True, blank=True)
errmsg = models.CharField(max_length=765, blank=True)
jobpid = models.IntegerField(null=True, blank=True)
start = models.DateTimeField()
starttime = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'elgg_cc_transport'
class ElggCmBaskets(models.Model):
userid = models.IntegerField(unique=True)
baskets = models.TextField()
updated = models.DateTimeField()
migrated = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'elgg_cm_baskets'
class ElggCmContainer(models.Model):
ident = models.IntegerField(primary_key=True)
x_ident = models.IntegerField()
body = models.TextField()
content_list = models.TextField()
container_type = models.CharField(max_length=150)
date_time = models.IntegerField()
target_duration = models.CharField(max_length=30)
duration = models.DecimalField(max_digits=14, decimal_places=4)
sub_type = models.IntegerField()
best_broadcast_segment = models.CharField(max_length=1200)
best_broadcast_daytime = models.CharField(max_length=60)
best_broadcast_weekday = models.CharField(max_length=60)
livesession_license = models.IntegerField()
played = models.IntegerField()
rotation_include = models.IntegerField()
rebroadcast_url = models.CharField(max_length=1536)
class Meta:
db_table = u'elgg_cm_container'
class ElggCmFile(models.Model):
ident = models.IntegerField(primary_key=True)
file = models.CharField(max_length=765)
x_ident = models.IntegerField()
posted = models.IntegerField()
filetype = models.CharField(max_length=240)
class Meta:
db_table = u'elgg_cm_file'
class ElggCmLog(models.Model):
ident = models.IntegerField(primary_key=True)
type = models.CharField(max_length=60)
content_ident = models.IntegerField()
action = models.CharField(max_length=180)
user_ident = models.IntegerField()
timestamp = models.IntegerField()
class Meta:
db_table = u'elgg_cm_log'
class ElggCmMaster(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
editor = models.IntegerField()
lastupdater = models.IntegerField()
type = models.CharField(max_length=60)
title = models.TextField()
intro = models.TextField()
access = models.CharField(max_length=60, blank=True)
access_write = models.CharField(max_length=60)
lastupdate = models.IntegerField()
posted = models.IntegerField()
is_history = models.IntegerField()
index = models.TextField()
status = models.IntegerField()
duration = models.IntegerField()
notes = models.IntegerField()
revnumber = models.IntegerField()
locked = models.IntegerField()
locked_userident = models.IntegerField()
migrated = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'elgg_cm_master'
class ElggCmMedias(models.Model):
id = models.IntegerField(primary_key=True)
x_id = models.IntegerField()
created = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField(null=True, blank=True)
published = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=60, blank=True)
notes = models.TextField(blank=True)
filesize = models.IntegerField(null=True, blank=True)
fileformat = models.CharField(max_length=36, blank=True)
dataformat = models.CharField(max_length=36, blank=True)
channels = models.IntegerField(null=True, blank=True)
sample_rate = models.IntegerField(null=True, blank=True)
bitrate = models.IntegerField(null=True, blank=True)
channelmode = models.CharField(max_length=36, blank=True)
bitrate_mode = models.CharField(max_length=36, blank=True)
lossless = models.IntegerField(null=True, blank=True)
encoder_options = models.CharField(max_length=36, blank=True)
compression_ratio = models.DecimalField(null=True, max_digits=14, decimal_places=4, blank=True)
encoding = models.CharField(max_length=36, blank=True)
path = models.CharField(max_length=3072, blank=True)
sourcepath = models.CharField(max_length=3072, blank=True)
parentdirectory = models.CharField(max_length=750, blank=True)
filename = models.CharField(max_length=750, blank=True)
pipeline_status = models.IntegerField()
has_flac_default = models.IntegerField()
has_mp3_default = models.IntegerField()
has_mp3_64 = models.IntegerField()
has_mp3_128 = models.IntegerField()
has_mp3_320 = models.IntegerField()
has_peakfile = models.IntegerField()
has_peakfile_raw = models.IntegerField()
has_peakfile_mp3 = models.IntegerField()
lock = models.IntegerField()
class Meta:
db_table = u'elgg_cm_medias'
class ElggCmRelations(models.Model):
ident = models.IntegerField(primary_key=True)
c_ident_master = models.IntegerField()
c_ident_slave = models.IntegerField()
relation_type = models.IntegerField()
user_ident = models.IntegerField()
class Meta:
db_table = u'elgg_cm_relations'
class ElggCmText(models.Model):
ident = models.IntegerField(primary_key=True)
body = models.TextField()
x_ident = models.IntegerField()
posted = models.IntegerField()
class Meta:
db_table = u'elgg_cm_text'
class ElggCmWordlist(models.Model):
word_text = models.CharField(max_length=150, primary_key=True)
word_id = models.IntegerField()
word_common = models.IntegerField()
class Meta:
db_table = u'elgg_cm_wordlist'
class ElggCmWordmatch(models.Model):
content_ident = models.IntegerField()
word_id = models.IntegerField()
title_match = models.IntegerField()
class Meta:
db_table = u'elgg_cm_wordmatch'
class ElggComments(models.Model):
ident = models.IntegerField(primary_key=True)
object_id = models.IntegerField()
object_type = models.CharField(max_length=384)
owner = models.IntegerField()
postedname = models.CharField(max_length=384)
body = models.TextField()
posted = models.IntegerField()
class Meta:
db_table = u'elgg_comments'
class ElggContentFlags(models.Model):
ident = models.IntegerField(primary_key=True)
url = models.CharField(max_length=384)
class Meta:
db_table = u'elgg_content_flags'
class ElggDatalists(models.Model):
ident = models.IntegerField(primary_key=True)
name = models.CharField(max_length=96)
value = models.TextField()
class Meta:
db_table = u'elgg_datalists'
class ElggFeedPosts(models.Model):
ident = models.IntegerField(primary_key=True)
posted = models.CharField(max_length=192)
added = models.IntegerField()
feed = models.IntegerField()
title = models.TextField()
body = models.TextField()
url = models.CharField(max_length=765)
class Meta:
db_table = u'elgg_feed_posts'
class ElggFeedSubscriptions(models.Model):
ident = models.IntegerField(primary_key=True)
user_id = models.IntegerField()
feed_id = models.IntegerField()
autopost = models.CharField(max_length=9)
autopost_tag = models.CharField(max_length=384)
class Meta:
db_table = u'elgg_feed_subscriptions'
class ElggFeeds(models.Model):
ident = models.IntegerField(primary_key=True)
url = models.CharField(max_length=384)
feedtype = models.CharField(max_length=48)
name = models.TextField()
tagline = models.CharField(max_length=384)
siteurl = models.CharField(max_length=384)
last_updated = models.IntegerField()
class Meta:
db_table = u'elgg_feeds'
class ElggFileFolders(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
files_owner = models.IntegerField()
parent = models.IntegerField()
name = models.CharField(max_length=384)
access = models.CharField(max_length=60)
handler = models.CharField(max_length=96)
class Meta:
db_table = u'elgg_file_folders'
class ElggFileMetadata(models.Model):
ident = models.IntegerField(primary_key=True)
name = models.CharField(max_length=765)
value = models.TextField()
file_id = models.IntegerField()
class Meta:
db_table = u'elgg_file_metadata'
class ElggFiles(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
files_owner = models.IntegerField()
folder = models.IntegerField()
community = models.IntegerField()
title = models.CharField(max_length=765)
originalname = models.CharField(max_length=765)
description = models.CharField(max_length=765)
location = models.CharField(max_length=765)
access = models.CharField(max_length=60)
size = models.IntegerField()
time_uploaded = models.IntegerField()
handler = models.CharField(max_length=96)
class Meta:
db_table = u'elgg_files'
class ElggFilesIncoming(models.Model):
ident = models.IntegerField(primary_key=True)
installid = models.CharField(max_length=96)
intentiondate = models.IntegerField()
size = models.BigIntegerField()
foldername = models.CharField(max_length=384)
user_id = models.IntegerField()
class Meta:
db_table = u'elgg_files_incoming'
class ElggFriends(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField(unique=True)
friend = models.IntegerField()
status = models.CharField(max_length=12)
class Meta:
db_table = u'elgg_friends'
class ElggFriendsRequests(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField(unique=True)
friend = models.IntegerField(unique=True)
class Meta:
db_table = u'elgg_friends_requests'
class ElggGroupMembership(models.Model):
ident = models.IntegerField(primary_key=True)
user_id = models.IntegerField(unique=True)
group_id = models.IntegerField(unique=True)
class Meta:
db_table = u'elgg_group_membership'
class ElggGroups(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
name = models.CharField(max_length=384)
access = models.CharField(max_length=60)
class Meta:
db_table = u'elgg_groups'
class ElggIcons(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
filename = models.CharField(max_length=384)
description = models.CharField(max_length=765)
class Meta:
db_table = u'elgg_icons'
class ElggInvitations(models.Model):
ident = models.IntegerField(primary_key=True)
name = models.CharField(max_length=384)
email = models.CharField(max_length=384)
code = models.CharField(max_length=384)
owner = models.IntegerField()
added = models.IntegerField()
class Meta:
db_table = u'elgg_invitations'
class ElggIpoolData(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
title = models.TextField()
description = models.TextField()
body = models.TextField()
access = models.CharField(max_length=60, blank=True)
longitude = models.IntegerField()
latitude = models.IntegerField()
posted = models.IntegerField()
class Meta:
db_table = u'elgg_ipool_data'
class ElggLicences(models.Model):
ident = models.IntegerField(primary_key=True)
page_ident = models.IntegerField()
name = models.CharField(max_length=384)
modify_time = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'elgg_licences'
class ElggLicencesAcceptedCmMaster(models.Model):
licences_ident = models.ForeignKey(ElggLicences, db_column='licences_ident')
object_ident = models.IntegerField()
accept_time = models.DateTimeField()
licence_time = models.DateTimeField()
class Meta:
db_table = u'elgg_licences_accepted_cm_master'
class ElggLicencesAcceptedMlMedias(models.Model):
licences_ident = models.ForeignKey(ElggLicences, db_column='licences_ident')
object_ident = models.IntegerField()
accept_time = models.DateTimeField()
licence_time = models.DateTimeField()
class Meta:
db_table = u'elgg_licences_accepted_ml_medias'
class ElggLicencesAcceptedMlRelease(models.Model):
licences_ident = models.ForeignKey(ElggLicences, db_column='licences_ident')
object_ident = models.IntegerField()
accept_time = models.DateTimeField()
licence_time = models.DateTimeField()
class Meta:
db_table = u'elgg_licences_accepted_ml_release'
class ElggLicencesAcceptedUsers(models.Model):
licences_ident = models.ForeignKey(ElggLicences, db_column='licences_ident')
object_ident = models.IntegerField()
accept_time = models.DateTimeField()
licence_time = models.DateTimeField()
class Meta:
db_table = u'elgg_licences_accepted_users'
class ElggLicencesArchived(models.Model):
ident = models.IntegerField(primary_key=True)
page_ident = models.IntegerField()
user_ident = models.IntegerField()
uri = models.CharField(max_length=384)
title = models.CharField(max_length=384)
content = models.TextField()
version_time = models.DateTimeField()
class Meta:
db_table = u'elgg_licences_archived'
class ElggMessages(models.Model):
ident = models.IntegerField(primary_key=True)
title = models.TextField()
body = models.TextField()
from_id = models.IntegerField()
to_id = models.IntegerField()
posted = models.IntegerField()
status = models.CharField(max_length=18)
hidden_from = models.CharField(max_length=3)
hidden_to = models.CharField(max_length=3)
class Meta:
db_table = u'elgg_messages'
class ElggMlf2Banlists(models.Model):
name = models.CharField(max_length=765)
list = models.TextField()
class Meta:
db_table = u'elgg_mlf2_banlists'
class ElggMlf2Categories(models.Model):
id = models.IntegerField(primary_key=True)
order_id = models.IntegerField()
category = models.CharField(max_length=765)
description = models.CharField(max_length=765)
accession = models.IntegerField()
class Meta:
db_table = u'elgg_mlf2_categories'
class ElggMlf2Entries(models.Model):
id = models.IntegerField(unique=True)
pid = models.IntegerField()
tid = models.IntegerField()
uniqid = models.CharField(max_length=765)
time = models.DateTimeField()
last_reply = models.DateTimeField()
edited = models.DateTimeField()
edited_by = models.IntegerField(null=True, blank=True)
user_id = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=765)
subject = models.CharField(max_length=765)
category = models.IntegerField()
email = models.CharField(max_length=765)
hp = models.CharField(max_length=765)
location = models.CharField(max_length=765)
ip = models.CharField(max_length=765)
text = models.TextField()
tags = models.CharField(max_length=765)
show_signature = models.IntegerField(null=True, blank=True)
email_notification = models.IntegerField(null=True, blank=True)
marked = models.IntegerField(null=True, blank=True)
locked = models.IntegerField(null=True, blank=True)
sticky = models.IntegerField(null=True, blank=True)
views = models.IntegerField(null=True, blank=True)
spam = models.IntegerField(null=True, blank=True)
spam_check_status = models.IntegerField(null=True, blank=True)
edit_key = models.CharField(max_length=765)
startpoint = models.FloatField(null=True, blank=True)
endpoint = models.FloatField(null=True, blank=True)
threadtype = models.CharField(max_length=30)
class Meta:
db_table = u'elgg_mlf2_entries'
class ElggMlf2EntriesCache(models.Model):
cache_id = models.IntegerField(primary_key=True)
cache_text = models.TextField()
class Meta:
db_table = u'elgg_mlf2_entries_cache'
class ElggMlf2Logincontrol(models.Model):
time = models.DateTimeField()
ip = models.CharField(max_length=765)
logins = models.IntegerField()
class Meta:
db_table = u'elgg_mlf2_logincontrol'
class ElggMlf2Pages(models.Model):
id = models.IntegerField(primary_key=True)
order_id = models.IntegerField()
title = models.CharField(max_length=765)
content = models.TextField()
menu_linkname = models.CharField(max_length=765)
access = models.IntegerField()
class Meta:
db_table = u'elgg_mlf2_pages'
class ElggMlf2Settings(models.Model):
name = models.CharField(max_length=765)
value = models.CharField(max_length=765)
class Meta:
db_table = u'elgg_mlf2_settings'
class ElggMlf2Smilies(models.Model):
id = models.IntegerField(primary_key=True)
order_id = models.IntegerField()
file = models.CharField(max_length=300)
code_1 = models.CharField(max_length=150)
code_2 = models.CharField(max_length=150)
code_3 = models.CharField(max_length=150)
code_4 = models.CharField(max_length=150)
code_5 = models.CharField(max_length=150)
title = models.CharField(max_length=765)
class Meta:
db_table = u'elgg_mlf2_smilies'
class ElggMlf2Userdata(models.Model):
user_id = models.IntegerField(primary_key=True)
user_type = models.IntegerField()
user_name = models.CharField(max_length=765)
user_real_name = models.CharField(max_length=765)
gender = models.IntegerField()
birthday = models.DateField()
user_pw = models.CharField(max_length=765)
user_email = models.CharField(max_length=765)
email_contact = models.IntegerField(null=True, blank=True)
user_hp = models.CharField(max_length=765)
user_location = models.CharField(max_length=765)
signature = models.CharField(max_length=765)
profile = models.TextField()
logins = models.IntegerField()
last_login = models.DateTimeField()
last_logout = models.DateTimeField()
user_ip = models.CharField(max_length=765)
registered = models.DateTimeField()
thread_order = models.IntegerField()
user_view = models.IntegerField()
sidebar = models.IntegerField()
fold_threads = models.IntegerField()
thread_display = models.IntegerField()
new_posting_notification = models.IntegerField(null=True, blank=True)
new_user_notification = models.IntegerField(null=True, blank=True)
time_difference = models.IntegerField(null=True, blank=True)
user_lock = models.IntegerField(null=True, blank=True)
auto_login_code = models.CharField(max_length=765)
pwf_code = models.CharField(max_length=765)
activate_code = models.CharField(max_length=765)
class Meta:
db_table = u'elgg_mlf2_userdata'
class ElggMlf2UserdataCache(models.Model):
cache_id = models.IntegerField(primary_key=True)
cache_signature = models.TextField()
cache_profile = models.TextField()
class Meta:
db_table = u'elgg_mlf2_userdata_cache'
class ElggMlf2Useronline(models.Model):
ip = models.CharField(max_length=45)
time = models.IntegerField()
user_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'elgg_mlf2_useronline'
class ElggPages(models.Model):
ident = models.IntegerField(primary_key=True)
name = models.CharField(max_length=384, unique=True)
uri = models.CharField(max_length=384)
parent = models.IntegerField()
weight = models.IntegerField()
title = models.TextField()
content = models.TextField()
owner = models.IntegerField()
access = models.CharField(max_length=60)
class Meta:
db_table = u'elgg_pages'
class ElggPasswordRequests(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
code = models.CharField(max_length=384)
class Meta:
db_table = u'elgg_password_requests'
class ElggPreprodRelations(models.Model):
ident = models.IntegerField(primary_key=True)
c_ident_master = models.IntegerField()
c_ident_slave = models.IntegerField()
relation_type = models.IntegerField()
class Meta:
db_table = u'elgg_preprod_relations'
class ElggPreprodRsegment(models.Model):
ident = models.IntegerField(primary_key=True)
body = models.TextField()
x_ident = models.IntegerField()
posted = models.IntegerField()
class Meta:
db_table = u'elgg_preprod_rsegment'
class ElggPreprodRtransmission(models.Model):
ident = models.IntegerField(primary_key=True)
body = models.TextField()
x_ident = models.IntegerField()
posted = models.IntegerField()
class Meta:
db_table = u'elgg_preprod_rtransmission'
class ElggProfileData(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
access = models.CharField(max_length=60)
name = models.CharField(max_length=765)
value = models.TextField()
class Meta:
db_table = u'elgg_profile_data'
class ElggRiver(models.Model):
ident = models.IntegerField(primary_key=True)
userid = models.IntegerField()
object_id = models.IntegerField()
object_owner = models.IntegerField()
object_type = models.CharField(max_length=384)
access = models.CharField(max_length=384)
string = models.TextField()
ts = models.IntegerField()
class Meta:
db_table = u'elgg_river'
class ElggRoleCommunity(models.Model):
ident = models.IntegerField(primary_key=True)
role = models.CharField(max_length=150)
communities = models.CharField(max_length=1500)
class Meta:
db_table = u'elgg_role_community'
class ElggRolePermissions(models.Model):
ident = models.IntegerField(primary_key=True)
role = models.CharField(max_length=150)
permissions = models.CharField(max_length=1500)
class Meta:
db_table = u'elgg_role_permissions'
class ElggRoleProfessions(models.Model):
ident = models.IntegerField(primary_key=True)
role = models.CharField(max_length=150)
professions = models.CharField(max_length=1500)
class Meta:
db_table = u'elgg_role_professions'
class ElggSchedule(models.Model):
ident = models.IntegerField(primary_key=True)
starttime = models.IntegerField()
duration = models.IntegerField()
source = models.CharField(max_length=60)
source_ident = models.IntegerField()
added = models.IntegerField()
last_edit = models.IntegerField()
endtime = models.IntegerField()
user_ident = models.IntegerField()
status = models.IntegerField()
played = models.IntegerField()
class Meta:
db_table = u'elgg_schedule'
class ElggScheduleLog(models.Model):
ident = models.IntegerField(primary_key=True)
logtag = models.CharField(max_length=300)
data1 = models.TextField()
data2 = models.TextField()
s_ident = models.IntegerField()
sesskey = models.CharField(max_length=120)
user_ident = models.IntegerField()
day = models.IntegerField()
activitydate = models.IntegerField()
river_ident = models.IntegerField()
pl_ident = models.IntegerField()
class Meta:
db_table = u'elgg_schedule_log'
class ElggScheduleUdata(models.Model):
ident = models.IntegerField(primary_key=True)
userident = models.IntegerField()
day = models.CharField(max_length=36)
dtype = models.CharField(max_length=150)
class Meta:
db_table = u'elgg_schedule_udata'
class ElggStreamTokens(models.Model):
id = models.IntegerField(primary_key=True)
token = models.CharField(max_length=96)
class Meta:
db_table = u'elgg_stream_tokens'
class ElggTags(models.Model):
ident = models.IntegerField(primary_key=True)
tag = models.CharField(max_length=384)
tagtype = models.CharField(max_length=60)
ref = models.IntegerField()
access = models.CharField(max_length=60)
owner = models.IntegerField()
class Meta:
db_table = u'elgg_tags'
class ElggTemplateElements(models.Model):
ident = models.IntegerField(primary_key=True)
name = models.CharField(max_length=384)
content = models.TextField()
template_id = models.IntegerField()
class Meta:
db_table = u'elgg_template_elements'
class ElggTemplates(models.Model):
ident = models.IntegerField(primary_key=True)
name = models.CharField(max_length=384)
owner = models.IntegerField()
public = models.CharField(max_length=9)
shortname = models.CharField(max_length=384)
class Meta:
db_table = u'elgg_templates'
class ElggTimetabledigrisData(models.Model):
elgg_timetabledigris_ident = models.IntegerField(primary_key=True)
elgg_timetabledigris_txt = models.TextField()
class Meta:
db_table = u'elgg_timetabledigris_data'
class ElggToptags(models.Model):
ident = models.IntegerField(primary_key=True)
tag = models.TextField()
tagident = models.IntegerField(null=True, blank=True)
type = models.TextField()
class Meta:
db_table = u'elgg_toptags'
class ElggUserFlags(models.Model):
ident = models.IntegerField(primary_key=True)
user_id = models.IntegerField()
flag = models.CharField(max_length=192)
value = models.CharField(max_length=192)
class Meta:
db_table = u'elgg_user_flags'
class ElggUsers(models.Model):
ident = models.IntegerField(primary_key=True)
username = models.CharField(max_length=384)
password = models.CharField(max_length=96)
email = models.CharField(max_length=384)
name = models.CharField(max_length=384)
icon = models.IntegerField()
active = models.CharField(max_length=9)
alias = models.CharField(max_length=384)
code = models.CharField(max_length=96)
icon_quota = models.IntegerField()
file_quota = models.IntegerField()
template_id = models.IntegerField()
owner = models.IntegerField()
user_type = models.CharField(max_length=384)
moderation = models.CharField(max_length=12)
last_action = models.IntegerField()
template_name = models.CharField(max_length=384)
join_date = models.IntegerField(null=True, blank=True)
reg_ip = models.CharField(max_length=45, blank=True)
fb_id = models.CharField(max_length=72, unique=True, blank=True)
updated = models.DateTimeField()
migrated = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'elgg_users'
class ElggUsersAlias(models.Model):
ident = models.IntegerField(primary_key=True)
installid = models.CharField(max_length=96)
username = models.CharField(max_length=96)
firstname = models.CharField(max_length=192)
lastname = models.CharField(max_length=192)
email = models.CharField(max_length=384)
user_id = models.IntegerField()
class Meta:
db_table = u'elgg_users_alias'
class ElggWatchlist(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
object_id = models.IntegerField()
object_type = models.CharField(max_length=384)
class Meta:
db_table = u'elgg_watchlist'
class ElggWeblogComments(models.Model):
ident = models.IntegerField(primary_key=True)
post_id = models.IntegerField()
owner = models.IntegerField()
postedname = models.CharField(max_length=384)
body = models.TextField()
posted = models.IntegerField()
class Meta:
db_table = u'elgg_weblog_comments'
class ElggWeblogPosts(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
weblog = models.IntegerField()
icon = models.IntegerField()
access = models.CharField(max_length=60)
posted = models.IntegerField()
title = models.TextField()
body = models.TextField()
class Meta:
db_table = u'elgg_weblog_posts'
class ElggWeblogWatchlist(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
weblog_post = models.IntegerField()
class Meta:
db_table = u'elgg_weblog_watchlist'
class ElggWidgetData(models.Model):
ident = models.IntegerField(primary_key=True)
widget = models.IntegerField()
name = models.CharField(max_length=384)
value = models.TextField()
class Meta:
db_table = u'elgg_widget_data'
class ElggWidgets(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
type = models.CharField(max_length=384)
location = models.CharField(max_length=384)
location_id = models.IntegerField()
wcolumn = models.IntegerField()
display_order = models.IntegerField()
access = models.CharField(max_length=384)
class Meta:
db_table = u'elgg_widgets'
class ElggXblog(models.Model):
ident = models.IntegerField(primary_key=True)
x_modul = models.CharField(max_length=60, blank=True)
x_ident = models.IntegerField()
blog_ident = models.IntegerField()
class Meta:
db_table = u'elgg_xblog'
| hzlf/openbroadcast | website/legacy/obp_legacy/models_legacy.py | Python | gpl-3.0 | 31,942 | 0.005134 |
# Copyright (c) 2018 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2018 Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
import logging
import urllib
from plugins.default_token_connect.backends.cache import cached
from plugins.default_token_connect.backends import HTTPBackend
class TreqBackend(HTTPBackend):
"""
Treq args:
jazyk1:cs
jazyk2:en
hledejKde[]:ACQUIS
hledejKde[]:CORE
hledejKde[]:EUROPARL
hledejKde[]:PRESSEUROP
hledejKde[]:SUBTITLES
hledejKde[]:SYNDICATE
hledejCo:obnova
searchGo:
viceslovne:
lemma:
"""
DEFAULT_MAX_RESULT_LINES = 10
AVAIL_GROUPS = None
AVAIL_LANG_MAPPINGS = None
def __init__(self, conf, ident):
super(TreqBackend, self).__init__(conf, ident)
self._conf = conf
self.AVAIL_GROUPS = conf.get('availGroups', {})
self.AVAIL_LANG_MAPPINGS = conf.get('availTranslations', {})
@staticmethod
def _lang_from_corpname(corpname):
return corpname.split('_')[-1]
def _find_second_lang(self, corpora):
"""
Find a first language+corpus with available translations
for the primary language (= corpora[0]).
"""
primary_lang = self._lang_from_corpname(corpora[0])
translations = self.AVAIL_LANG_MAPPINGS.get(primary_lang, [])
for cn in corpora[1:]:
lang = self._lang_from_corpname(cn)
if lang in translations:
return cn, lang
return None, None
def enabled_for_corpora(self, corpora):
corp1 = corpora[0]
corp2 = corpora[1] if len(corpora) > 1 else None
if corp2 is None:
return False
lang1 = self._lang_from_corpname(corp1)
lang2 = self._lang_from_corpname(corp2)
return lang1 in self.AVAIL_LANG_MAPPINGS and lang2 in self.AVAIL_LANG_MAPPINGS[lang1]
@staticmethod
def mk_api_args(lang1, lang2, groups, lemma):
multiw_flag = '1' if ' ' in lemma else '0'
lemma_flag = '0' if ' ' in lemma else '1'
groups = ','.join(groups)
return [('left', lang1), ('right', lang2), ('viceslovne', multiw_flag), ('regularni', '0'),
('lemma', lemma_flag), ('aJeA', '1'), ('hledejKde', groups), ('hledejCo', lemma),
('order', 'percDesc')]
@staticmethod
def mk_page_args(lang1, lang2, groups, lemma):
multiw_flag = '1' if ' ' in lemma else '0'
lemma_flag = '0' if ' ' in lemma else '1'
return [('jazyk1', lang1), ('jazyk2', lang2), ('viceslovne', multiw_flag), ('regularni', '0'),
('lemma', lemma_flag), ('caseInsen', '1'), ('hledejCo', lemma)] + [('hledejKde[]', g) for g in groups]
def mk_api_path(self, args):
args = ['{0}={1}'.format(k, urllib.quote(v.encode('utf-8'))) for k, v in args]
return '/api.php?api=true&' + '&'.join(args)
def find_lang_common_groups(self, lang1, lang2):
g1 = set(self.AVAIL_GROUPS.get(lang1, []))
g2 = set(self.AVAIL_GROUPS.get(lang2, []))
return g1.intersection(g2)
def mk_server_addr(self):
if self._conf.get('ssl', False):
return ('https://' + self._conf['server']).encode('utf-8')
return ('http://' + self._conf['server']).encode('utf-8')
@cached
def fetch(self, corpora, token_id, num_tokens, query_args, lang):
"""
"""
primary_lang = self._lang_from_corpname(corpora[0])
translat_corp, translat_lang = self._find_second_lang(corpora)
treq_link = None
if translat_corp and translat_lang:
common_groups = self.find_lang_common_groups(primary_lang, translat_lang)
args = dict(lang1=self.enc_val(primary_lang), lang2=self.enc_val(translat_lang),
groups=[self.enc_val(s) for s in common_groups],
**query_args)
t_args = self.mk_page_args(**args)
treq_link = (self.mk_server_addr() + '/index.php', t_args)
ta_args = self.mk_api_args(lang1=args['lang1'], lang2=args['lang2'], groups=args['groups'],
lemma=args['lemma'])
connection = self.create_connection()
try:
logging.getLogger(__name__).debug(u'Treq request args: {0}'.format(ta_args))
connection.request('GET', self.mk_api_path(ta_args))
data, status = self.process_response(connection)
data = json.loads(data)
max_items = self._conf.get('maxResultItems', self.DEFAULT_MAX_RESULT_LINES)
data['lines'] = data['lines'][:max_items]
except ValueError:
logging.getLogger(__name__).error(u'Failed to parse response: {0}'.format(data))
data = dict(sum=0, lines=[])
finally:
connection.close()
else:
data = dict(sum=0, lines=[])
return json.dumps(dict(treq_link=treq_link,
sum=data.get('sum', 0),
translations=data.get('lines', []),
primary_corp=corpora[0],
translat_corp=translat_corp)), True
| tomachalek/kontext | lib/plugins/default_token_connect/backends/treq.py | Python | gpl-2.0 | 6,004 | 0.002165 |
import sys
from Bio import SeqIO
input_file = sys.argv[1]
output_file = sys.argv[2]
def Ungap(seq):
seq.seq=seq.seq.ungap('-')
return seq
output_gen = (Ungap(seq) for seq in SeqIO.parse(input_file, 'fasta'))
SeqIO.write(output_gen,output_file, 'fasta')
| wonder041/MegaPipeline | Standardize.py | Python | mit | 264 | 0.015152 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-31 14:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exoral', '0004_merge_20170327_0002'),
('exoral', '0003_auto_20170322_1453'),
]
operations = [
]
| fachschaft-medizin-rostock/django-fsmedhro | exoral/migrations/0005_merge_20170331_1617.py | Python | mit | 337 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('videos', '0014_add_enabled_and_notes'),
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(unique=True)),
('enabled', models.BooleanField(default=True)),
('working', models.BooleanField(default=False)),
('lastRetrieved', models.DateTimeField(default=timezone.make_aware(datetime.datetime(1970,1,1)))),
('notes', models.CharField(max_length=1024, null=True, blank=True)),
],
),
]
| palfrey/kitling | frontend/videos/migrations/0015_add_channel.py | Python | agpl-3.0 | 907 | 0.005513 |
from __future__ import print_function
import shutil
import os.path
import tempfile
import cProfile
import pstats
import nineml
from nineml.utils.comprehensive_example import (
instances_of_all_types, v1_safe_docs)
from nineml.serialization import ext_to_format, format_to_serializer
format_to_ext = dict((v, k) for k, v in ext_to_format.items()) # @UndefinedVariable @IgnorePep8
print_serialized = False
printable = ('xml', 'json', 'yaml')
_tmp_dir = tempfile.mkdtemp()
def function():
for version in (1.0, 2.0):
if version == 1.0:
docs = v1_safe_docs
else:
docs = list(instances_of_all_types['NineML'].values())
for format in format_to_serializer: # @ReservedAssignment
try:
ext = format_to_ext[format]
except KeyError:
continue # ones that can't be written to file (e.g. dict)
for i, document in enumerate(docs):
doc = document.clone()
url = os.path.join(
_tmp_dir, 'test{}v{}{}'.format(i, version, ext))
nineml.write(url, doc, format=format, version=version,
indent=2)
if print_serialized and format in printable:
with open(url) as f:
print(f.read())
reread_doc = nineml.read(url, reload=True) # @UnusedVariable
shutil.rmtree(_tmp_dir)
out_file = os.path.join(os.getcwd(), 'serial_profile.out')
cProfile.run('function()', out_file)
p = pstats.Stats(out_file)
p.sort_stats('cumtime').print_stats()
| INCF/lib9ML | test/serialization_profile.py | Python | bsd-3-clause | 1,617 | 0.001237 |
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
from __future__ import unicode_literals
# ------------------------------------------------------------------------------
# Name: Proxy checker
# Purpose: Just a couple of functions to check various proxy configuration
#
# Author: Julien Moura (@geojulien)
#
# Python: 2.7.x with arcpy
# Created: 10/04/2015
# Updated: 10/04/2015
#
# Licence: GPL 3
# -----------------------------------------------------------------------------
###############################################################################
########### Libraries #############
###################################
# Standard library
import urllib2
import socket
import sys
import string
import os
# 3rd party libraries
import arcpy
###############################################################################
############ Functions ############
###################################
# execfile("parameters.py")
def is_bad_proxy(pip):
"""
TO COMMENT
"""
try:
proxy_handler = urllib2.ProxyHandler({'http': pip})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib2.install_opener(opener)
req=urllib2.Request('http://www.example.com') # change the URL to test here
sock=urllib2.urlopen(req)
except urllib2.HTTPError, e:
print 'Error code: ', e.code
return e.code
except Exception, detail:
print "ERROR:", detail
return True
return False
def main():
"""
TO COMMENT
"""
socket.setdefaulttimeout(120)
# two sample proxy IPs
proxyList = ['10.0.4.2:3128', '{0}:{1}'.format(prox, port),
'{0}://{1}:{2}@{3}:{4}'.format(protocole, user, password, prox, port)]
for currentProxy in proxyList:
if is_bad_proxy(currentProxy):
print "Bad Proxy %s" % (currentProxy)
arcpy.AddMessage("Bad Proxy")
else:
print "%s is working" % (currentProxy)
arcpy.AddMessage("is working")
###############################################################################
###### Stand alone program ########
###################################
if __name__ == '__main__':
""" standalone execution for testing """
pass
| Guts/isogeo2sig | StandAlone/modules/proxy_checker.py | Python | gpl-3.0 | 2,322 | 0.004737 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('search', '0003_auto_20150321_1848'),
]
operations = [
migrations.AddField(
model_name='searchquery',
name='scope',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
| IllegalCactus/argument-workbench | querytool/search/migrations/0004_searchquery_scope.py | Python | gpl-3.0 | 437 | 0 |
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_planners')
import rospy
import sensor_msgs.msg
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
class TfPub:
def __init__(self):
pass
def spin(self):
self.br = tf.TransformBroadcaster()
rospy.sleep(1.0)
while not rospy.is_shutdown():
rot = PyKDL.Rotation.RotY( 30.0 / 180.0 * math.pi )
q = rot.GetQuaternion()
self.br.sendTransform([0, 0, 1.65], [q[0], q[1], q[2], q[3]], rospy.Time.now(), "head_kinect_link", "world")
if __name__ == '__main__':
rospy.init_node('tf_pub')
v = TfPub()
v.spin()
| dseredyn/velma_planners | scripts/tf_pub.py | Python | gpl-2.0 | 2,492 | 0.008026 |
from console.main.command_handler.commands.command import Command
class SimpleCommand(Command):
pass
| lubokkanev/cloud-system | console/main/command_handler/commands/simple_command.py | Python | gpl-2.0 | 107 | 0 |
from models import *
from forms import CategoryImageForm
from django.contrib import admin
class CategoryImageInline(admin.TabularInline):
model = CategoryImage
form = CategoryImageForm
class CategoryOptions(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
list_display = ['name', 'slug', 'parent', 'sortorder', 'published']
inlines = [
CategoryImageInline,
]
fieldsets = (
(None, {
'fields': ('name', 'slug', 'parent', 'sortorder', 'published',)
}),
('Meta options', {
'classes': ('collapse',),
'fields': ('meta_keywords', 'meta_description', )
}),
)
class CategoryImageAdmin(admin.ModelAdmin):
model = CategoryImage
form = CategoryImageForm
admin.site.register(CategoryImage, CategoryImageAdmin)
admin.site.register(Category, CategoryOptions) | howiworkdaily/scofield-project | scofield/category/admin.py | Python | bsd-3-clause | 898 | 0.010022 |
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument
from . import variable
class Integer(variable.Variable):
resource_name = "integer"
default = argument.Integer()
min = argument.Integer()
max = argument.Integer()
class Set(variable.Set):
resource = Integer
def to_lines(self, value):
return [str(value)]
class Get(variable.Get):
resource = Integer
def from_lines(self, value):
return int(value[0])
argument.Integer.register_adapter(Integer, variable.VariableAsString)
| yaybu/touchdown | touchdown/config/integer.py | Python | apache-2.0 | 1,092 | 0 |
#!/usr/bin/env python
import unittest
from test_helpers import *
contents = load_input_contents('../gdb_stacktraces/rhbz-803600')
threads_expected = 2
frames_expected = 227
expected_short_text = '''Thread no. 1 (5 frames)
#0 validate_row at gtktreeview.c:5699
#1 validate_visible_area at gtktreeview.c:5898
#2 gtk_tree_view_bin_expose at gtktreeview.c:4253
#3 gtk_tree_view_expose at gtktreeview.c:4955
#4 _gtk_marshal_BOOLEAN__BOXED at gtkmarshalers.c:84
'''
expected_short_text_955617 = '''Thread no. 1 (3 frames)
#10 xf86CursorSetCursor at xf86Cursor.c:333
#11 xf86CursorEnableDisableFBAccess at xf86Cursor.c:233
#12 ??
'''
class TestGdbStacktrace(BindingsTestCase):
def setUp(self):
self.trace = satyr.GdbStacktrace(contents)
def test_correct_thread_count(self):
self.assertEqual(len(self.trace.threads), threads_expected)
def test_correct_frame_count(self):
self.assertEqual(frame_count(self.trace), frames_expected)
def test_dup(self):
dup = self.trace.dup()
self.assertNotEqual(id(dup.threads), id(self.trace.threads))
self.assertTrue(all(map(lambda t1, t2: t1.equals(t2), dup.threads, self.trace.threads)))
dup.threads = dup.threads[:5]
dup2 = dup.dup()
self.assertNotEqual(id(dup.threads), id(dup2.threads))
self.assertTrue(all(map(lambda t1, t2: t1.equals(t2), dup.threads, dup2.threads)))
def test_prepare_linked_list(self):
dup = self.trace.dup()
dup.threads = dup.threads[:5]
dup.normalize()
self.assertTrue(len(dup.threads) <= 5)
def test_normalize(self):
dup = self.trace.dup()
dup.normalize()
self.assertNotEqual(frame_count(dup), frame_count(self.trace))
def test_str(self):
out = str(self.trace)
self.assertTrue(('Stacktrace with %d threads' % threads_expected) in out)
def test_to_short_text(self):
self.assertEqual(self.trace.to_short_text(5), expected_short_text)
def test_bthash(self):
self.assertEqual(self.trace.get_bthash(), 'd0fcdc87161ccb093f7efeff12218321d8fd5298')
def test_crash_thread(self):
self.assertTrue(self.trace.crash_thread is self.trace.threads[1])
def test_hash(self):
self.assertHashable(self.trace)
def test_short_text_normalization(self):
contents = load_input_contents('../gdb_stacktraces/rhbz-955617')
trace = satyr.GdbStacktrace(contents)
self.assertEqual(trace.to_short_text(5), expected_short_text_955617)
class TestGdbThread(BindingsTestCase):
def setUp(self):
self.thread = satyr.GdbStacktrace(contents).threads[0]
def test_getset(self):
self.assertGetSetCorrect(self.thread, 'number', 2, 9000)
def test_equals(self):
self.assertTrue(self.thread.equals(self.thread))
dup = self.thread.dup()
self.assertTrue(self.thread.equals(dup))
dup.number = 9000
self.assertFalse(self.thread.equals(dup))
def test_duphash(self):
expected_plain = 'Thread\n write\n virNetSocketWriteWire\n virNetSocketWrite\n'
self.assertEqual(self.thread.get_duphash(flags=satyr.DUPHASH_NOHASH, frames=3), expected_plain)
self.assertEqual(self.thread.get_duphash(), '01d2a92281954a81dee9098dc4f8056ef5a5a5e1')
def test_hash(self):
self.assertHashable(self.thread)
class TestGdbSharedlib(BindingsTestCase):
def setUp(self):
self.shlib = satyr.GdbStacktrace(contents).libs[0]
def test_getset(self):
self.assertGetSetCorrect(self.shlib, 'start_address', 0x3ecd63c680, 10)
self.assertGetSetCorrect(self.shlib, 'end_address', 0x3ecd71f0f8, 20)
self.assertGetSetCorrect(self.shlib, 'symbols', satyr.SYMS_OK, satyr.SYMS_WRONG)
self.assertGetSetCorrect(self.shlib, 'soname', '/usr/lib64/libpython2.6.so.1.0', '/dev/null')
def test_hash(self):
self.assertHashable(self.shlib)
class TestGdbFrame(BindingsTestCase):
def setUp(self):
self.frame = satyr.GdbStacktrace(contents).threads[0].frames[0]
def test_str(self):
out = str(self.frame)
self.assertTrue('0x0000003ec220e48d' in out)
self.assertTrue('write' in out)
self.assertTrue('Frame #0' in out)
def test_dup(self):
dup = self.frame.dup()
self.assertEqual(dup.function_name,
self.frame.function_name)
dup.function_name = 'other'
self.assertNotEqual(dup.function_name,
self.frame.function_name)
def test_cmp(self):
dup = self.frame.dup()
self.assertTrue(dup.equals(dup))
self.assertTrue(dup.equals(self.frame))
dup.function_name = 'another'
self.assertFalse(dup.equals(self.frame))
def test_getset(self):
self.assertGetSetCorrect(self.frame, 'function_name', 'write', 'foo bar')
self.assertGetSetCorrect(self.frame, 'function_type', None, 'Maybe Integer')
self.assertGetSetCorrect(self.frame, 'number', 0, 42)
self.assertGetSetCorrect(self.frame, 'source_file', '../sysdeps/unix/syscall-template.S', 'ok.c')
self.assertGetSetCorrect(self.frame, 'source_line', 82, 1337)
self.assertGetSetCorrect(self.frame, 'signal_handler_called', False, True)
self.assertGetSetCorrect(self.frame, 'address', 0x3ec220e48d, 0x666)
self.assertGetSetCorrect(self.frame, 'address', 0x666, 4398046511104)
## 2^66, this is expected to fail
#self.assertGetSetCorrect(self.frame, 'address', 4398046511104, 73786976294838206464L)
self.assertGetSetCorrect(self.frame, 'library_name', None, 'sowhat.so')
def test_hash(self):
self.assertHashable(self.frame)
if __name__ == '__main__':
unittest.main()
| airtimemedia/satyr | tests/python/gdb.py | Python | gpl-2.0 | 5,764 | 0.00399 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
def sector_validation(apps, schema_editor):
""" Remove sector from RSR validation set """
ProjectEditorValidation = apps.get_model('rsr', 'ProjectEditorValidation')
sector_validators = ['rsr_sector', 'rsr_sector.sector_code', 'rsr_sector.vocabulary']
for v in sector_validators:
validation = ProjectEditorValidation.objects.filter(validation_set_id=1, validation__exact=v)
if validation:
validation.delete()
def undo_sector_validation(apps, schema_editor):
""" Remove sector from RSR validation set """
ProjectEditorValidation = apps.get_model('rsr', 'ProjectEditorValidation')
sector_validators = ['rsr_sector', 'rsr_sector.sector_code', 'rsr_sector.vocabulary']
for v in sector_validators:
ProjectEditorValidation.objects.get_or_create(validation=v, action=1, validation_set_id=1)
class Migration(migrations.Migration):
dependencies = [
('rsr', '0078_auto_20160613_1428'),
]
operations = [
migrations.RunPython(sector_validation, undo_sector_validation),
]
| akvo/akvo-rsr | akvo/rsr/migrations/0079_auto_20160620_1418.py | Python | agpl-3.0 | 1,137 | 0.004398 |
import time
import pymemcache.client
import pytest
from limits import RateLimitItemPerMinute, RateLimitItemPerSecond
from limits.storage import MemcachedStorage, storage_from_string
from limits.strategies import (
FixedWindowElasticExpiryRateLimiter,
FixedWindowRateLimiter,
)
from tests.utils import fixed_start
@pytest.mark.memcached
@pytest.mark.flaky
class TestMemcachedStorage:
@pytest.fixture(autouse=True)
def setup(self, memcached, memcached_cluster):
self.storage_url = "memcached://localhost:22122"
def test_init_options(self, mocker):
constructor = mocker.spy(pymemcache.client, "PooledClient")
assert storage_from_string(self.storage_url, connect_timeout=1).check()
assert constructor.call_args[1]["connect_timeout"] == 1
@fixed_start
def test_fixed_window(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerSecond(10)
start = time.time()
count = 0
while time.time() - start < 0.5 and count < 10:
assert limiter.hit(per_min)
count += 1
assert not limiter.hit(per_min)
while time.time() - start <= 1:
time.sleep(0.1)
assert limiter.hit(per_min)
@fixed_start
def test_fixed_window_cluster(self):
storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerSecond(10)
start = time.time()
count = 0
while time.time() - start < 0.5 and count < 10:
assert limiter.hit(per_min)
count += 1
assert not limiter.hit(per_min)
while time.time() - start <= 1:
time.sleep(0.1)
assert limiter.hit(per_min)
@fixed_start
def test_fixed_window_with_elastic_expiry(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowElasticExpiryRateLimiter(storage)
per_sec = RateLimitItemPerSecond(2, 2)
assert limiter.hit(per_sec)
time.sleep(1)
assert limiter.hit(per_sec)
assert not limiter.test(per_sec)
time.sleep(1)
assert not limiter.test(per_sec)
time.sleep(1)
assert limiter.test(per_sec)
@fixed_start
def test_fixed_window_with_elastic_expiry_cluster(self):
storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
limiter = FixedWindowElasticExpiryRateLimiter(storage)
per_sec = RateLimitItemPerSecond(2, 2)
assert limiter.hit(per_sec)
time.sleep(1)
assert limiter.hit(per_sec)
assert not limiter.test(per_sec)
time.sleep(1)
assert not limiter.test(per_sec)
time.sleep(1)
assert limiter.test(per_sec)
def test_clear(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerMinute(1)
limiter.hit(per_min)
assert not limiter.hit(per_min)
limiter.clear(per_min)
assert limiter.hit(per_min)
| alisaifee/limits | tests/storage/test_memcached.py | Python | mit | 3,218 | 0.000622 |
import unittest
from conans.test.utils.tools import TestServer, TestClient
from conans.model.ref import ConanFileReference
import os
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.paths import CONANFILE, CONANFILE_TXT
from conans.util.files import load
generator = """
from conans.model import Generator
from conans.paths import BUILD_INFO
from conans import ConanFile, CMake
class MyCustom_Generator(Generator):
@property
def filename(self):
return "customfile.gen"
@property
def content(self):
return "My custom generator content"
class MyCustomGeneratorPackage(ConanFile):
name = "MyCustomGen"
version = "0.2"
"""
consumer = """
[requires]
Hello0/0.1@lasote/stable
MyCustomGen/0.2@lasote/stable
[generators]
MyCustom_Generator
"""
generator_multi = """
from conans.model import Generator
from conans.paths import BUILD_INFO
from conans import ConanFile, CMake
class MyCustomMultiGenerator(Generator):
@property
def filename(self):
return "customfile.gen"
@property
def content(self):
return {"file1.gen": "CustomContent1",
"file2.gen": "CustomContent2"}
class NoMatterTheName(ConanFile):
name = "MyCustomGen"
version = "0.2"
"""
consumer_multi = """
[requires]
MyCustomGen/0.2@lasote/stable
[generators]
MyCustomMultiGenerator
"""
class CustomGeneratorTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
def reuse_test(self):
conan_reference = ConanFileReference.loads("Hello0/0.1@lasote/stable")
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
client.save(files)
client.run("export . lasote/stable")
client.run("upload %s" % str(conan_reference))
gen_reference = ConanFileReference.loads("MyCustomGen/0.2@lasote/stable")
files = {CONANFILE: generator}
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
client.save(files)
client.run("export . lasote/stable")
client.run("upload %s" % str(gen_reference))
# Test local, no retrieval
files = {CONANFILE_TXT: consumer}
client.save(files, clean_first=True)
client.run("install . --build")
generated = load(os.path.join(client.current_folder, "customfile.gen"))
self.assertEqual(generated, "My custom generator content")
# Test retrieval from remote
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
files = {CONANFILE_TXT: consumer}
client.save(files)
client.run("install . --build")
generated = load(os.path.join(client.current_folder, "customfile.gen"))
self.assertEqual(generated, "My custom generator content")
def multifile_test(self):
gen_reference = ConanFileReference.loads("MyCustomGen/0.2@lasote/stable")
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
files = {CONANFILE: generator_multi}
client.save(files)
client.run("export . lasote/stable")
client.run("upload %s" % str(gen_reference))
# Test local, no retrieval
files = {CONANFILE_TXT: consumer_multi}
client.save(files, clean_first=True)
client.run("install . --build")
self.assertIn("Generator MyCustomMultiGenerator is multifile. "
"Property 'filename' not used",
client.user_io.out)
for i in (1, 2):
generated = load(os.path.join(client.current_folder, "file%d.gen" % i))
self.assertEqual(generated, "CustomContent%d" % i)
| birsoyo/conan | conans/test/generators/custom_generator_test.py | Python | mit | 3,848 | 0.001819 |
# Copyright (c) 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from dragonflow.controller.common import constants
from dragonflow.tests.unit import test_app_base
class TestChassisSNATApp(test_app_base.DFAppTestBase):
apps_list = ["chassis_snat"]
external_host_ip = '172.24.4.100'
def setUp(self):
cfg.CONF.set_override('external_host_ip',
self.external_host_ip,
group='df')
super(TestChassisSNATApp, self).setUp()
self.SNAT_app = self.open_flow_app.dispatcher.apps['chassis_snat']
self.SNAT_app.external_ofport = 99
def test_switch_features_handler(self):
ev = mock.Mock()
ev.msg.datapath.ofproto.OFP_VERSION = 0x04
open_flow_app = self.controller.switch_backend.open_flow_app
open_flow_app.switch_features_handler(ev)
self.SNAT_app.add_flow_go_to_table.assert_has_calls(
[mock.call(
constants.L3_LOOKUP_TABLE,
constants.PRIORITY_LOW,
constants.EGRESS_NAT_TABLE,
match=mock.ANY)])
self.SNAT_app.mod_flow.assert_has_calls(
[mock.call(
inst=mock.ANY,
table_id=constants.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
priority=constants.PRIORITY_DEFAULT,
match=mock.ANY),
mock.call(
inst=mock.ANY,
table_id=constants.INGRESS_NAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY),
mock.call(
inst=mock.ANY,
table_id=constants.EGRESS_NAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY),
mock.call(
actions=mock.ANY,
table_id=constants.EGRESS_SNAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY)])
def test_add_local_port(self):
self.controller.update(test_app_base.fake_local_port1)
self.SNAT_app.mod_flow.assert_has_calls(
[mock.call(
inst=mock.ANY,
table_id=constants.INGRESS_SNAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY)])
def test_remove_local_port(self):
self.controller.update(test_app_base.fake_local_port1)
self.SNAT_app.mod_flow.reset_mock()
self.controller.delete(test_app_base.fake_local_port1)
self.SNAT_app.mod_flow.assert_has_calls(
[mock.call(
command=mock.ANY,
table_id=constants.INGRESS_SNAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY)])
| openstack/dragonflow | dragonflow/tests/unit/test_chassis_snat_app.py | Python | apache-2.0 | 3,471 | 0 |
"""
You are playing the following Bulls and Cows game with your friend: You write down a number and ask your friend to guess what the number is. Each time your friend makes a guess, you provide a hint that indicates how many digits in said guess match your secret number exactly in both digit and position (called "bulls") and how many digits match the secret number but locate in the wrong position (called "cows"). Your friend will use successive guesses and hints to eventually derive the secret number.
For example:
Secret number: "1807"
Friend's guess: "7810"
Hint: 1 bull and 3 cows. (The bull is 8, the cows are 0, 1 and 7.)
Write a function to return a hint according to the secret number and friend's guess, use A to indicate the bulls and B to indicate the cows. In the above example, your function should return "1A3B".
Please note that both secret number and friend's guess may contain duplicate digits, for example:
Secret number: "1123"
Friend's guess: "0111"
In this case, the 1st 1 in friend's guess is a bull, the 2nd or 3rd 1 is a cow, and your function should return "1A1B".
You may assume that the secret number and your friend's guess only contain digits, and their lengths are always equal.
"""
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
bulls = [c for i, c in enumerate(secret) if guess[i] == c]
total = sum(min(secret.count(c), guess.count(c)) for c in '0123456789')
return '%dA%dB' % (len(bulls), total - len(bulls))
| dichen001/Go4Jobs | JackChen/hash/299. Bulls and Cows.py | Python | gpl-3.0 | 1,587 | 0.003781 |
import re
import math as maths
from typing import Iterable, Dict
from base64 import b64encode
from flask import Response, render_template, make_response, current_app
from peewee import SelectQuery
from htmlmin.minify import html_minify
from .models import Entry
PAGE_GROUP_SIZE = 5
ENTRIES_PER_PAGE = 10
class Paginator:
query: SelectQuery
current_page: int
def __init__(self, query: SelectQuery, current_page: int) -> None:
self.query = query
self.current_page = current_page
def get_entries(self) -> Iterable[Entry]:
return self.query.paginate(self.current_page, ENTRIES_PER_PAGE) \
.iterator()
def get_total_pages(self) -> int:
return maths.ceil(self.query.count() / ENTRIES_PER_PAGE)
def has_previous_page(self) -> bool:
return self.current_page != 1
def has_next_page(self) -> bool:
return self.current_page != self.get_total_pages()
def page_number_group(self) -> Iterable[int]:
padding = PAGE_GROUP_SIZE // 2
start_page = self.current_page - padding
end_page = self.current_page + padding
total_pages = self.get_total_pages()
if start_page < 1 and end_page > total_pages:
start_page = 1
end_page = total_pages
else:
if start_page < 1:
difference = 1 - start_page
start_page += difference
end_page += difference
if end_page > total_pages:
difference = end_page - total_pages
end_page -= difference
start_page -= difference
if start_page < 1:
start_page = 1
return range(start_page, end_page + 1)
_template_cache: Dict[str, str] = {}
def _get_js() -> str:
if 'js' not in _template_cache:
with current_app.open_resource('static/bundle.js') as file:
_template_cache['js'] = file.read().decode('utf-8')
return _template_cache['js']
def _get_css() -> str:
if 'css' not in _template_cache:
with current_app.open_resource('static/bundle.css') as file:
_template_cache['css'] = re.sub(
pattern=r'^.+?\*\/',
repl='',
string=file.read().decode('utf-8'),
count=1,
flags=re.DOTALL
)
return _template_cache['css']
def _get_favicon_url() -> str:
if 'favicon_url' not in _template_cache:
with current_app.open_resource('static/favicon.png') as file:
favicon_bytes = file.read()
encoded = b64encode(favicon_bytes).decode('utf-8')
_template_cache['favicon_url'] = f'data:image/png;base64,{encoded}'
return _template_cache['favicon_url']
def template_response(*args, status_code: int = 200, **kwargs) -> Response:
html = render_template(
*args,
**kwargs,
js=_get_js(),
css=_get_css(),
favicon_url=_get_favicon_url()
)
html = html_minify(html)
return make_response(html, status_code)
| elcr/muhblog | muhblog/utils.py | Python | mit | 3,071 | 0 |
# -*- encoding: utf-8 -*-
from contextlib import contextmanager
from datetime import datetime
import sys
from traceback import format_exception
import colorama
from .conf import settings
def color(color_):
"""Utility for ability to disabling colored output."""
if settings.no_colors:
return ''
else:
return color_
def exception(title, exc_info):
sys.stderr.write(
u'{warn}[WARN] {title}:{reset}\n{trace}'
u'{warn}----------------------------{reset}\n\n'.format(
warn=color(colorama.Back.RED + colorama.Fore.WHITE
+ colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL),
title=title,
trace=''.join(format_exception(*exc_info))))
def rule_failed(rule, exc_info):
exception('Rule {}'.format(rule.name), exc_info)
def failed(msg):
sys.stderr.write('{red}{msg}{reset}\n'.format(
msg=msg,
red=color(colorama.Fore.RED),
reset=color(colorama.Style.RESET_ALL)))
def show_corrected_command(corrected_command):
sys.stderr.write('{bold}{script}{reset}{side_effect}\n'.format(
script=corrected_command.script,
side_effect=' (+side effect)' if corrected_command.side_effect else '',
bold=color(colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL)))
def confirm_text(corrected_command):
sys.stderr.write(
('{clear}{bold}{script}{reset}{side_effect} '
'[{green}enter{reset}/{blue}↑{reset}/{blue}↓{reset}'
'/{red}ctrl+c{reset}]').format(
script=corrected_command.script,
side_effect=' (+side effect)' if corrected_command.side_effect else '',
clear='\033[1K\r',
bold=color(colorama.Style.BRIGHT),
green=color(colorama.Fore.GREEN),
red=color(colorama.Fore.RED),
reset=color(colorama.Style.RESET_ALL),
blue=color(colorama.Fore.BLUE)))
def debug(msg):
if settings.debug:
sys.stderr.write(u'{blue}{bold}DEBUG:{reset} {msg}\n'.format(
msg=msg,
reset=color(colorama.Style.RESET_ALL),
blue=color(colorama.Fore.BLUE),
bold=color(colorama.Style.BRIGHT)))
@contextmanager
def debug_time(msg):
started = datetime.now()
try:
yield
finally:
debug(u'{} took: {}'.format(msg, datetime.now() - started))
def how_to_configure_alias(configuration_details):
print("Seems like {bold}fuck{reset} alias isn't configured!".format(
bold=color(colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL)))
if configuration_details:
content, path = configuration_details
print(
"Please put {bold}{content}{reset} in your "
"{bold}{path}{reset}.".format(
bold=color(colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL),
path=path,
content=content))
print('More details - https://github.com/nvbn/thefuck#manual-installation')
| redreamality/thefuck | thefuck/logs.py | Python | mit | 3,065 | 0.000327 |
import os
import sys
import gzip
import time
import numpy as np
import pickle as pkl
from scipy.sparse import hstack
from sklearn.metrics import roc_auc_score
from models import MTC
if len(sys.argv) != 7:
print('Usage: python', sys.argv[0],
'WORK_DIR DATASET C P N_SEED TRAIN_DEV(Y/N)')
sys.exit(0)
else:
work_dir = sys.argv[1]
dataset = sys.argv[2]
C = float(sys.argv[3])
p = float(sys.argv[4])
n_seed = int(sys.argv[5])
trndev = sys.argv[6]
assert trndev in ['Y', 'N']
data_dir = os.path.join(work_dir, 'data/%s/setting2' % dataset)
if trndev == 'N':
fxtrain = os.path.join(data_dir, 'X_train_pop_%d.pkl.gz' % n_seed)
fytrain = os.path.join(data_dir, 'Y_train.pkl.gz')
fytrndev = os.path.join(data_dir, 'Y_trndev.pkl.gz')
fydev = os.path.join(data_dir, 'PU_dev_%d.pkl.gz' % n_seed)
fcliques = os.path.join(data_dir, 'cliques_trndev.pkl.gz')
fprefix = 'pop-%g-%g-%g' % (n_seed, C, p)
else:
fxtrain = os.path.join(data_dir, 'X_trndev_pop_%d.pkl.gz' % n_seed)
fytrain = os.path.join(data_dir, 'Y_trndev.pkl.gz')
fytrndev = os.path.join(data_dir, 'Y.pkl.gz')
fydev = os.path.join(data_dir, 'PU_test_%d.pkl.gz' % n_seed)
fcliques = os.path.join(data_dir, 'cliques_all.pkl.gz')
fprefix = 'trndev-pop-%g-%g-%g' % (n_seed, C, p)
fmodel = os.path.join(data_dir, '%s.pkl.gz' % fprefix)
fnpy = os.path.join(data_dir, '%s.npy' % fprefix)
X_train = pkl.load(gzip.open(fxtrain, 'rb'))
Y_train = pkl.load(gzip.open(fytrain, 'rb'))
Y_train_dev = pkl.load(gzip.open(fytrndev, 'rb'))
PU_dev = pkl.load(gzip.open(fydev, 'rb'))
cliques = pkl.load(gzip.open(fcliques, 'rb'))
print('N_SEED: %g, C: %g, p: %g' % (n_seed, C, p))
print(X_train.shape, Y_train.shape)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
if os.path.exists(fmodel):
print('evaluating ...')
clf = pkl.load(gzip.open(fmodel, 'rb')) # for evaluation
else:
print('training ...')
Y = hstack([Y_train, PU_dev]).tocsc().astype(np.bool)
clf = MTC(X_train, Y, C=C, p=p, user_playlist_indices=cliques, label_feature=False)
clf.fit(njobs=1, verbose=2, fnpy=fnpy)
if clf.trained is True:
# pkl.dump(clf, gzip.open(fmodel, 'wb'))
Y_dev = Y_train_dev[:, -PU_dev.shape[1]:]
offset = Y_train_dev.shape[1] - PU_dev.shape[1]
rps = []
aucs = []
for j in range(Y_dev.shape[1]):
y1 = Y_dev[:, j].toarray().reshape(-1)
y2 = PU_dev[:, j].toarray().reshape(-1)
indices = np.where(0 == y2)[0]
y_true = y1[indices]
npos = y_true.sum()
assert npos > 0
assert npos + y2.sum() == y1.sum()
k = offset + j
u = clf.pl2u[k]
wk = clf.V[u, :] + clf.W[k, :] + clf.mu
X = X_train
y_pred = np.dot(X, wk)[indices]
sortix = np.argsort(-y_pred)
y_ = y_true[sortix]
rps.append(np.mean(y_[:npos]))
aucs.append(roc_auc_score(y_true, y_pred))
clf.metric_score = (np.mean(aucs), np.mean(rps), len(rps), Y_dev.shape[1])
pkl.dump(clf, gzip.open(fmodel, 'wb'))
print('\n%.5f, %.5f, %d / %d' % clf.metric_score)
| cdawei/digbeta | dchen/music/src/PLA_pop.py | Python | gpl-3.0 | 3,102 | 0.000322 |
#!/usr/bin/env python
# Copyright (c) 2016 The aionotify project
# This code is distributed under the two-clause BSD License.
import codecs
import os
import re
import sys
from setuptools import setup
root_dir = os.path.abspath(os.path.dirname(__file__))
def get_version(package_name):
version_re = re.compile(r"^__version__ = [\"']([\w_.-]+)[\"']$")
package_components = package_name.split('.')
init_path = os.path.join(root_dir, *(package_components + ['__init__.py']))
with codecs.open(init_path, 'r', 'utf-8') as f:
for line in f:
match = version_re.match(line[:-1])
if match:
return match.groups()[0]
return '0.1.0'
PACKAGE = 'aionotify'
setup(
name=PACKAGE,
version=get_version(PACKAGE),
description="Asyncio-powered inotify library",
author="Raphaël Barrois",
author_email="raphael.barrois+%s@polytechnique.org" % PACKAGE,
url='https://github.com/rbarrois/%s' % PACKAGE,
keywords=['asyncio', 'inotify'],
packages=[PACKAGE],
license='BSD',
setup_requires=[
],
tests_require=[
'asynctest',
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Filesystems",
],
test_suite='tests',
)
| rbarrois/aionotify | setup.py | Python | bsd-2-clause | 1,579 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the quantize_graph graph rewriting API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class QuantizeGraphTest(test_util.TensorFlowTestCase):
# We have a lot of other tests that test the details of the rewrite, here we
# just the specific features of the quantize_graph API.
def _RunTestOverAllRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverTrainingRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.experimental_create_training_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverEvalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewritesWithScope(self, test_fn, scope):
def with_absent_scope(fn):
def fn_with_absent_scope(*args):
fn(*args, scope=scope)
return fn_with_absent_scope
rewrite_fns = [
with_absent_scope(
quantize_graph.experimental_create_training_graph),
with_absent_scope(
quantize_graph.experimental_create_eval_graph),
]
for fn in rewrite_fns:
test_fn(fn)
def testRewrite(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestRewrite(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
orig_variable_names = set(
[v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn(graph)
q_variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testDefaultGraph(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestDefaultGraph(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer()
orig_variable_names = set(
[v.name for v in g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn()
q_variables = g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testWithPreActivationBypass(self):
self._RunTestOverAllRewrites(self._TestWithPreActivationBypass)
def _TestWithPreActivationBypass(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer(pre_activation_bypass=True, scope='scope1')
rewrite_fn()
op_names = [op.name for op in g.get_operations()]
self.assertTrue(
any('scope1/add_quant/' in name for name in op_names))
def testWithPostActivationBypass(self):
self._RunTestOverAllRewrites(self._TestWithPostActivationBypass)
def _TestWithPostActivationBypass(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer(post_activation_bypass=True, scope='scope1')
rewrite_fn()
op_names = [op.name for op in g.get_operations()]
self.assertTrue(any(
'scope1/post_activation_bypass_quant/' in name for name in op_names))
def testQuantDelay(self):
self._RunTestOverTrainingRewrites(self._TestQuantDelay)
def _TestQuantDelay(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
quant_delay = 100
rewrite_fn(quant_delay=quant_delay)
quant_delay_found = False
for op in g.get_operations():
# Check to see if the quant_delay is correctly set.
if 'activate_quant' in op.name and op.type == 'Const':
quant_delay_found = True
const_value = str(op.get_attr('value'))
self.assertTrue(('int64_val: %i' % quant_delay) in const_value)
self.assertTrue(quant_delay_found)
def testWeightBits(self):
self._RunTestOverExperimentalRewrites(self._TestWeightBits)
def _TestWeightBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
weight_bits = 4
rewrite_fn(weight_bits=weight_bits)
weights_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for weights have the right bits
# set.
if 'weights_quant' in op.name and op.type == 'FakeQuantWithMinMaxVars':
weights_quant_found = True
self.assertEqual(op.get_attr('num_bits'), weight_bits)
self.assertTrue(weights_quant_found)
def testActivationBits(self):
self._RunTestOverExperimentalRewrites(self._TestActivationBits)
def _TestActivationBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
activation_bits = 4
rewrite_fn(activation_bits=activation_bits)
act_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for activations have the right bits
# set.
act_quant_names = ['act_quant', 'conv_quant', 'add_quant']
if any(s in op.name
for s in act_quant_names) and op.type == 'FakeQuantWithMinMaxVars':
act_quant_found = True
self.assertEqual(op.get_attr('num_bits'), activation_bits)
self.assertTrue(act_quant_found)
def testTrainingQuantization(self):
self._RunTestOverTrainingRewrites(self._TestTrainingQuantization)
def _TestTrainingQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
assign_min_last_found = False
assign_min_ema_found = False
assign_max_last_found = False
assign_max_ema_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables exist in
# the graph.
if 'AssignMinLast' in op.name:
assign_min_last_found = True
elif 'AssignMinEma' in op.name:
assign_min_ema_found = True
elif 'AssignMaxLast' in op.name:
assign_max_last_found = True
elif 'AssignMaxEma' in op.name:
assign_max_ema_found = True
self.assertTrue(assign_min_last_found)
self.assertTrue(assign_min_ema_found)
self.assertTrue(assign_max_last_found)
self.assertTrue(assign_max_ema_found)
self.assertTrue(quant_found)
def testEvalQuantization(self):
self._RunTestOverEvalRewrites(self._TestEvalQuantization)
def _TestEvalQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables don't
# exist in the graph.
update_names = [
'AssignMinLast', 'AssignMinEma', 'AssignMaxLast', 'AssignMaxEma'
]
self.assertFalse(any(s in op.name for s in update_names))
self.assertTrue(quant_found)
def testIdempotent(self):
self._RunTestOverAllRewrites(self._TestIdempotent)
def _TestIdempotent(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
graph_def_before = str(g.as_graph_def())
# Ensuring that calling the rewrite again doesn't add more nodes.
rewrite_fn()
graph_def_after = str(g.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
def testRewriteWithScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestRewriteWithScope, 'scope1')
def _TestRewriteWithScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
scope1_output = self._ConvLayer(scope='scope1')
self._ConvLayer(input_tensor=scope1_output, scope='scope2')
rewrite_fn(graph)
op_names = [op.name for op in graph.get_operations()]
# The weights and activation of scope1 is quantized, but not scope2.
self.assertTrue(
any('scope1/Conv/act_quant' in name for name in op_names))
self.assertTrue(
any('scope1/Conv/weights_quant' in name for name in op_names))
self.assertFalse(
any('scope2/Conv/act_quant' in name for name in op_names))
self.assertFalse(
any('scope2/Conv/weights_quant' in name for name in op_names))
def testRewriteWithNonMatchingScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestRewriteWithNonMatchingScope, 'NonExistingScope')
def _TestRewriteWithNonMatchingScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
op_names_before_rewrite = set([op.name for op in graph.get_operations()])
rewrite_fn(graph)
op_names_after_rewrite = set([op.name for op in graph.get_operations()])
# No ops should be inserted or removed.
self.assertEqual(op_names_before_rewrite, op_names_after_rewrite)
def _ConvLayer(
self, input_tensor=None, scope='test', pre_activation_bypass=False,
post_activation_bypass=False):
"""Add a basic convolution layer to the default graph."""
batch_size, height, width, depth = 5, 128, 128, 3
if input_tensor is None:
input_tensor = array_ops.zeros((batch_size, height, width, depth))
weight_init = init_ops.truncated_normal_initializer
with ops.name_scope(scope):
output = layers.conv2d(
input_tensor,
depth, [5, 5],
padding='SAME',
weights_initializer=weight_init(0.09),
activation_fn=None)
if pre_activation_bypass:
output += input_tensor
output = nn_ops.relu6(output)
if post_activation_bypass:
output += input_tensor
return output
if __name__ == '__main__':
googletest.main()
| allenlavoie/tensorflow | tensorflow/contrib/quantize/python/quantize_graph_test.py | Python | apache-2.0 | 11,911 | 0.009151 |
''' Present a scatter plot with linked histograms on both axes.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve selection_histogram.py
at your command prompt. Then navigate to the URL
http://localhost:5006/selection_histogram
in your browser.
'''
import numpy as np
from bokeh.models import BoxSelectTool, LassoSelectTool, Paragraph
from bokeh.plotting import figure, hplot, vplot, curdoc
# create three normal population samples with different parameters
x1 = np.random.normal(loc=5.0, size=400) * 100
y1 = np.random.normal(loc=10.0, size=400) * 10
x2 = np.random.normal(loc=5.0, size=800) * 50
y2 = np.random.normal(loc=5.0, size=800) * 10
x3 = np.random.normal(loc=55.0, size=200) * 10
y3 = np.random.normal(loc=4.0, size=200) * 10
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
TOOLS="pan,wheel_zoom,box_select,lasso_select"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=600, plot_height=600, title=None, min_border=10, min_border_left=50)
r = p.scatter(x, y, size=3, color="#3A5785", alpha=0.6)
p.select(BoxSelectTool).select_every_mousemove = False
p.select(LassoSelectTool).select_every_mousemove = False
# create the horizontal histogram
hhist, hedges = np.histogram(x, bins=20)
hzeros = np.zeros(len(hedges)-1)
hmax = max(hhist)*1.1
LINE_ARGS = dict(color="#3A5785", line_color=None)
ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,
y_range=(-hmax, hmax), title=None, min_border=10, min_border_left=50)
ph.xgrid.grid_line_color = None
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color="white", line_color="#3A5785")
hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)
hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)
# create the vertical histogram
vhist, vedges = np.histogram(y, bins=20)
vzeros = np.zeros(len(vedges)-1)
vmax = max(vhist)*1.1
th = 42 # need to adjust for toolbar height, unfortunately
pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height+th-10, x_range=(-vmax, vmax),
y_range=p.y_range, title=None, min_border=10, min_border_top=th)
pv.ygrid.grid_line_color = None
pv.xaxis.major_label_orientation = -3.14/2
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color="white", line_color="#3A5785")
vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)
vh2 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)
pv.min_border_top = 80
pv.min_border_left = 0
ph.min_border_top = 10
ph.min_border_right = 10
p.min_border_right = 10
layout = vplot(hplot(p, pv), hplot(ph, Paragraph(width=200)), width=800, height=800)
curdoc().add_root(layout)
def update(attr, old, new):
inds = np.array(new['1d']['indices'])
if len(inds) == 0 or len(inds) == len(x):
hhist1, hhist2 = hzeros, hzeros
vhist1, vhist2 = vzeros, vzeros
else:
neg_inds = np.ones_like(x, dtype=np.bool)
neg_inds[inds] = False
hhist1, _ = np.histogram(x[inds], bins=hedges)
vhist1, _ = np.histogram(y[inds], bins=vedges)
hhist2, _ = np.histogram(x[neg_inds], bins=hedges)
vhist2, _ = np.histogram(y[neg_inds], bins=vedges)
hh1.data_source.data["top"] = hhist1
hh2.data_source.data["top"] = -hhist2
vh1.data_source.data["right"] = vhist1
vh2.data_source.data["right"] = -vhist2
r.data_source.on_change('selected', update)
| justacec/bokeh | examples/app/selection_histogram.py | Python | bsd-3-clause | 3,598 | 0.005281 |
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.tests import utils
from cinderclient.tests.v2 import fakes
cs = fakes.FakeClient()
class VolumeBackupsTest(utils.TestCase):
def test_create(self):
cs.backups.create('2b695faf-b963-40c8-8464-274008fbcef4')
cs.assert_called('POST', '/backups')
def test_get(self):
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
cs.backups.get(backup_id)
cs.assert_called('GET', '/backups/%s' % backup_id)
def test_list(self):
cs.backups.list()
cs.assert_called('GET', '/backups/detail')
def test_delete(self):
b = cs.backups.list()[0]
b.delete()
cs.assert_called('DELETE',
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
cs.backups.delete('76a17945-3c6f-435c-975b-b5685db10b62')
cs.assert_called('DELETE',
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
cs.backups.delete(b)
cs.assert_called('DELETE',
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
def test_restore(self):
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
cs.restores.restore(backup_id)
cs.assert_called('POST', '/backups/%s/restore' % backup_id)
def test_record_export(self):
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
cs.backups.export_record(backup_id)
cs.assert_called('GET',
'/backups/%s/export_record' % backup_id)
def test_record_import(self):
backup_service = 'fake-backup-service'
backup_url = 'fake-backup-url'
expected_body = {'backup-record': {'backup_service': backup_service,
'backup_url': backup_url}}
cs.backups.import_record(backup_service, backup_url)
cs.assert_called('POST', '/backups/import_record', expected_body)
| swamireddy/python-cinderclient | cinderclient/tests/v2/test_volume_backups.py | Python | apache-2.0 | 2,562 | 0 |
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" Connectivity nodes are points where terminals of conducting equipment are connected together with zero impedance.
"""
# <<< imports
# @generated
from cpsm.core.identified_object import IdentifiedObject
from cpsm.core.connectivity_node_container import ConnectivityNodeContainer
from google.appengine.ext import db
# >>> imports
class ConnectivityNode(IdentifiedObject):
""" Connectivity nodes are points where terminals of conducting equipment are connected together with zero impedance.
"""
# <<< connectivity_node.attributes
# @generated
# >>> connectivity_node.attributes
# <<< connectivity_node.references
# @generated
# Virtual property. Terminals interconnect with zero impedance at a node. Measurements on a node apply to all of its terminals.
pass # terminals
# Container of this connectivity node.
member_of_equipment_container = db.ReferenceProperty(ConnectivityNodeContainer,
collection_name="connectivity_nodes")
# >>> connectivity_node.references
# <<< connectivity_node.operations
# @generated
# >>> connectivity_node.operations
# EOF -------------------------------------------------------------------------
| rwl/openpowersystem | cpsm/topology/connectivity_node.py | Python | agpl-3.0 | 2,078 | 0.006256 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import six
from shuup.apps.provides import get_provide_objects
from shuup.utils.importing import cached_load, load
_registry = []
def register(module_class):
if isinstance(module_class, six.string_types):
module_class = load(module_class, "Admin Module")
_registry.append(module_class())
def discover():
for obj in get_provide_objects("admin_module"):
register(obj)
def get_admin_modules():
"""
:rtype: list[shuup.admin.base.AdminModule]
"""
if not _registry:
discover()
return iter(_registry)
def get_modules():
"""
:rtype: list[shuup.admin.base.AdminModule]
"""
get_modules_spec = cached_load("SHUUP_GET_ADMIN_MODULES_SPEC")
return get_modules_spec()
def get_module_urls():
for module in get_modules(): # pragma: no branch
for url in module.get_urls(): # pragma: no branch
yield url
@contextlib.contextmanager
def replace_modules(new_module_classes):
"""
Context manager to temporarily replace all modules with something else.
Test utility, mostly.
>>> def some_test():
... with replace_modules(["foo.bar:QuuxModule"]):
... pass # do stuff
:param new_module_classes: Iterable of module classes, like you'd pass to `register`
"""
old_registry = _registry[:]
_registry[:] = []
for cls in new_module_classes:
register(cls)
try:
yield
finally:
_registry[:] = old_registry
| suutari-ai/shoop | shuup/admin/module_registry.py | Python | agpl-3.0 | 1,750 | 0.000571 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from document_renderer import DocumentRenderer
from server_instance import ServerInstance
from test_file_system import TestFileSystem
from test_data.canned_data import CANNED_TEST_FILE_SYSTEM_DATA
class DocumentRendererUnittest(unittest.TestCase):
def setUp(self):
self._renderer = ServerInstance.ForTest(
TestFileSystem(CANNED_TEST_FILE_SYSTEM_DATA)).document_renderer
def testNothingToSubstitute(self):
document = 'hello world'
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(document, text)
self.assertEqual(['Expected a title'], warnings)
def testTitles(self):
document = '<h1>title</h1> then $(title) then another $(title)'
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(document, text)
self.assertEqual(['Found unexpected title "title"'], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual('<h1>title</h1> then title then another $(title)', text)
self.assertEqual([], warnings)
def testTocs(self):
document = ('here is a toc $(table_of_contents) '
'and another $(table_of_contents)')
expected_document = ('here is a toc <table-of-contents> and another '
'$(table_of_contents)')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testRefs(self):
# The references in this and subsequent tests won't actually be resolved
document = 'A ref $(ref:baz.baz_e1) here, $(ref:foo.foo_t3 ref title) there'
expected_document = ('A ref <a href=#type-baz_e1>baz.baz_e1</a> '
'here, <a href=#type-foo_t3>ref title</a> '
'there')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testTitleAndToc(self):
document = '<h1>title</h1> $(title) and $(table_of_contents)'
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual('<h1>title</h1> $(title) and <table-of-contents>', text)
self.assertEqual(['Found unexpected title "title"'], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual('<h1>title</h1> title and <table-of-contents>', text)
self.assertEqual([], warnings)
def testRefInTitle(self):
document = '<h1>$(ref:baz.baz_e1 title)</h1> A $(title) was here'
expected_document_no_title = ('<h1><a href=#type-baz_e1>'
'title</a></h1> A $(title) was here')
expected_document = ('<h1><a href=#type-baz_e1>title</a></h1>'
' A title was here')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document_no_title, text)
self.assertEqual([('Found unexpected title "title"')], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
def testRefSplitAcrossLines(self):
document = 'Hello, $(ref:baz.baz_e1 world). A $(ref:foo.foo_t3\n link)'
expected_document = ('Hello, <a href=#type-baz_e1>world</a>. A <a href='
'#type-foo_t3>link</a>')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testInvalidRef(self):
# DocumentRenderer attempts to detect unclosed $(ref:...) tags by limiting
# how far it looks ahead. Lorem Ipsum should be long enough to trigger that.
_LOREM_IPSUM = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do '
'eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim '
'ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut '
'aliquip ex ea commodo consequat. Duis aute irure dolor in '
'reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla '
'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in '
'culpa qui officia deserunt mollit anim id est laborum.')
document = ('An invalid $(ref:foo.foo_t3 a title ' + _LOREM_IPSUM +
'$(ref:baz.baz_e1) here')
expected_document = ('An invalid $(ref:foo.foo_t3 a title ' + _LOREM_IPSUM +
'<a href=#type-baz_e1>baz.baz_e1</a> here')
path = 'apps/some/path/to/document_api.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
if __name__ == '__main__':
unittest.main()
| chromium2014/src | chrome/common/extensions/docs/server2/document_renderer_test.py | Python | bsd-3-clause | 6,119 | 0.002125 |
"""
dwm package setup
"""
from __future__ import print_function
from setuptools import setup, find_packages
__version__ = '1.1.0'
def readme():
""" open readme for long_description """
try:
with open('README.md') as fle:
return fle.read()
except IOError:
return ''
setup(
name='dwm',
version=__version__,
url='https://github.com/rh-marketingops/dwm',
license='GNU General Public License',
author='Jeremiah Coleman',
tests_require=['nose', 'mongomock>=3.5.0'],
install_requires=['pymongo>=3.2.2', 'tqdm>=4.8.4'],
author_email='colemanja91@gmail.com',
description='Best practices for marketing data quality management',
long_description=readme(),
packages=find_packages(),
include_package_data=True,
platforms='any',
test_suite='nose.collector',
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
keywords='marketing automation data quality cleanse washing cleaning'
)
| rh-marketingops/dwm | setup.py | Python | gpl-3.0 | 1,421 | 0.001407 |
import os
import logging
from jsub.util import safe_mkdir
from jsub.util import safe_rmdir
class Submit(object):
def __init__(self, manager, task_id, sub_ids=None, dry_run=False, resubmit=False):
self.__manager = manager
self.__task = self.__manager.load_task(task_id)
self.__sub_ids = sub_ids
self.__dry_run = dry_run
self.__resubmit = resubmit
self.__logger = logging.getLogger('JSUB')
if self.__sub_ids==None:
self.__sub_ids=range(len(self.__task.data['jobvar']))
self.__initialize_manager()
def __initialize_manager(self):
self.__config_mgr = self.__manager.load_config_manager()
self.__backend_mgr = self.__manager.load_backend_manager()
self.__bootstrap_mgr = self.__manager.load_bootstrap_manager()
self.__navigator_mgr = self.__manager.load_navigator_manager()
self.__context_mgr = self.__manager.load_context_manager()
self.__action_mgr = self.__manager.load_action_manager()
self.__launcher_mgr = self.__manager.load_launcher_manager()
def handle(self):
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
main_root = os.path.join(run_root, 'main')
safe_rmdir(main_root)
safe_mkdir(main_root)
self.__create_input(main_root)
self.__create_context(main_root)
self.__create_action(main_root)
self.__create_navigator(main_root)
self.__create_bootstrap(main_root)
launcher_param = self.__create_launcher(run_root)
self.__submit(launcher_param)
def __create_input(self, main_root):
content = self.__manager.load_content()
input_dir = os.path.join(main_root,'input')
try:
content.get(self.__task.data['id'], 'input', os.path.join(main_root, 'input'))
except:
safe_mkdir(input_dir)
def __create_context(self, main_root):
context_dir = os.path.join(main_root, 'context')
safe_mkdir(context_dir)
action_default = {}
for unit, param in self.__task.data['workflow'].items():
action_default[unit] = self.__action_mgr.default_config(param['type'])
navigators = self.__config_mgr.navigator()
context_format = self.__navigator_mgr.context_format(navigators)
self.__context_mgr.create_context_file(self.__task.data, action_default, context_format, context_dir)
def __create_action(self, main_root):
action_dir = os.path.join(main_root, 'action')
safe_mkdir(action_dir)
actions = set()
for unit, param in self.__task.data['workflow'].items():
actions.add(param['type'])
self.__action_mgr.create_actions(actions, action_dir)
def __create_navigator(self, main_root):
navigator_dir = os.path.join(main_root, 'navigator')
safe_mkdir(navigator_dir)
navigators = self.__config_mgr.navigator()
self.__navigator_mgr.create_navigators(navigators, navigator_dir)
def __create_bootstrap(self, main_root):
bootstrap_dir = os.path.join(main_root, 'bootstrap')
safe_mkdir(bootstrap_dir)
bootstrap = self.__config_mgr.bootstrap()
self.__bootstrap_mgr.create_bootstrap(bootstrap, bootstrap_dir)
def __create_launcher(self, run_root):
launcher = self.__task.data['backend']['launcher']
return self.__launcher_mgr.create_launcher(launcher, run_root)
def __submit(self, launcher_param):
if self.__dry_run:
return
if self.__resubmit==False:
if self.__task.data.get('backend_job_ids') or self.__task.data.get('backend_task_id'):
self.__logger.info('This task has already been submitted to backend, rerun the command with "-r" option if you wish to delete current jobs and resubmit the task.')
return
else:
self.__logger.info('Removing submitted jobs on backend before resubmission.')
task_id = self.__task.data.get('backend_task_id')
#remove previously generated files in job folder
job_ids = self.__task.data.get('backend_job_ids')
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
job_root=os.path.join(run_root,'subjobs')
safe_rmdir(job_root)
if task_id:
self.__backend_mgr.delete_task(self.__task.data['backend'],backend_task_id = task_id)
elif job_ids:
self.__backend_mgr.delete_jobs(self.__task.data['backend'],backend_job_ids = job_ids)
result = self.__backend_mgr.submit(self.__task.data['backend'], self.__task.data['id'], launcher_param, sub_ids = self.__sub_ids)
if not type(result) is dict:
result = {}
if 'backend_job_ids' in result:
njobs = len(result['backend_job_ids'])
else:
njobs = len(result)
if njobs>0:
self.__logger.info('%d jobs successfully submitted to backend.'%(njobs))
self.__task.data.setdefault('backend_job_ids',{})
backend_job_ids=result.get('backend_job_ids',{})
backend_task_id=result.get('backend_task_id',0)
self.__task.data['backend_job_ids'].update(backend_job_ids)
self.__task.data['backend_task_id']=backend_task_id
self.__task.data['status'] = 'Submitted'
task_pool = self.__manager.load_task_pool()
task_pool.save(self.__task)
self.__logger.debug(result)
| jsubpy/jsub | jsub/operation/submit.py | Python | mit | 4,925 | 0.031675 |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import base64
import datetime
import hashlib
from httplib import HTTPException
import json
import logging
import os
import re
from types import NoneType
import types
from google.appengine.api import images, urlfetch, search
from google.appengine.api.images import composite, TOP_LEFT, BOTTOM_LEFT
from google.appengine.api.urlfetch_errors import DeadlineExceededError
from google.appengine.ext import db, deferred
import facebook
from mcfw.cache import invalidate_cache
from mcfw.properties import azzert
from mcfw.rpc import returns, arguments
from mcfw.utils import chunks
from rogerthat.bizz.friends import INVITE_ID, INVITE_FACEBOOK_FRIEND, invite, breakFriendShip, makeFriends, userCode
from rogerthat.bizz.job import run_job
from rogerthat.bizz.messaging import sendMessage
from rogerthat.bizz.session import drop_sessions_of_user
from rogerthat.bizz.system import get_identity, identity_update_response_handler
from rogerthat.bizz.user import reactivate_user_profile
from rogerthat.capi.system import identityUpdate
from rogerthat.consts import MC_DASHBOARD
from rogerthat.dal import parent_key, put_and_invalidate_cache
from rogerthat.dal.app import get_app_name_by_id, get_app_by_user, get_default_app
from rogerthat.dal.broadcast import get_broadcast_settings_flow_cache_keys_of_user
from rogerthat.dal.friend import get_friends_map
from rogerthat.dal.profile import get_avatar_by_id, get_existing_profiles_via_facebook_ids, \
get_existing_user_profiles, get_user_profile, get_profile_infos, get_profile_info, get_service_profile, \
is_trial_service, \
get_user_profiles, get_service_or_user_profile, get_deactivated_user_profile
from rogerthat.dal.service import get_default_service_identity_not_cached, get_all_service_friend_keys_query, \
get_service_identities_query, get_all_archived_service_friend_keys_query, get_friend_serviceidentity_connection, \
get_default_service_identity
from rogerthat.models import FacebookUserProfile, Avatar, ProfilePointer, ShortURL, ProfileDiscoveryResult, \
FacebookProfilePointer, FacebookDiscoveryInvite, Message, ServiceProfile, UserProfile, ServiceIdentity, ProfileInfo, \
App, \
Profile, SearchConfig, FriendServiceIdentityConnectionArchive, \
UserData, UserDataArchive, ActivationLog, ProfileHashIndex
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile
from rogerthat.rpc.rpc import logError, SKIP_ACCOUNTS
from rogerthat.rpc.service import BusinessException
from rogerthat.to.friends import FacebookRogerthatProfileMatchTO
from rogerthat.to.messaging import ButtonTO, UserMemberTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.to.system import IdentityUpdateRequestTO
from rogerthat.translations import localize, DEFAULT_LANGUAGE
from rogerthat.utils import now, urlencode, is_clean_app_user_email, get_epoch_from_datetime
from rogerthat.utils.app import get_app_id_from_app_user, create_app_user, get_human_user_from_app_user, \
get_app_user_tuple, create_app_user_by_email
from rogerthat.utils.channel import send_message
from rogerthat.utils.oauth import LinkedInClient
from rogerthat.utils.service import create_service_identity_user, remove_slash_default
from rogerthat.utils.transactions import on_trans_committed, run_in_transaction
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
CURRENT_DIR = os.path.dirname(__file__)
UNKNOWN_AVATAR_PATH = os.path.join(CURRENT_DIR, 'unknown_avatar.png')
NUNTIUZ_AVATAR_PATH = os.path.join(CURRENT_DIR, 'nuntiuz.png')
USER_INDEX = "USER_INDEX"
class FailedToBuildFacebookProfileException(BusinessException):
pass
def get_unknown_avatar():
f = open(UNKNOWN_AVATAR_PATH, "rb")
try:
return f.read()
finally:
f.close()
def get_nuntiuz_avatar():
f = open(NUNTIUZ_AVATAR_PATH, "rb")
try:
return f.read()
finally:
f.close()
UNKNOWN_AVATAR = get_unknown_avatar()
NUNTIUZ_AVATAR = get_nuntiuz_avatar()
@returns(NoneType)
@arguments(app_user=users.User)
def schedule_re_index(app_user):
# Does NOT have to be transactional, running it over and over does not harm
deferred.defer(_re_index, app_user)
def create_user_index_document(index, app_user_email, fields):
email_encoded = 'base64:' + base64.b64encode(app_user_email)
doc = search.Document(doc_id=email_encoded, fields=fields)
return index.put(doc)[0]
def delete_user_index_document(index, app_user_email):
email_encoded = 'base64:' + base64.b64encode(app_user_email)
return index.delete(email_encoded)[0]
def _re_index(app_user):
def trans():
user_profile = get_profile_info(app_user, False)
fm = get_friends_map(app_user)
return user_profile, fm
user_profile, fm = db.run_in_transaction(trans)
app_user_email = app_user.email()
# delete old indexed app user if the doc_id is app_user_email (not encoded)
user_index = search.Index(name=USER_INDEX)
try:
if user_index.get(app_user_email):
user_index.delete(app_user_email)
except search.InvalidRequest:
pass
if not user_profile:
logging.info("Tried to index a user who is deactivated")
delete_user_index_document(user_index, app_user_email)
return
if user_profile.isServiceIdentity:
logging.error("Tried to index a service into the USER_INDEX")
return
connections = StringIO()
for f in fm.friends:
email = f.email().encode('utf8').replace('"', '')
connections.write('@@%s@@' % email)
if '/' in email:
connections.write('@@%s@@' % email.split('/')[0])
human_user, app_id = get_app_user_tuple(app_user)
fields = [
search.TextField(name='email', value=human_user.email()),
search.TextField(name='name', value=user_profile.name),
search.TextField(name='language', value=user_profile.language),
search.TextField(name='connections', value=connections.getvalue()),
search.TextField(name='app_id', value=app_id)
]
if user_profile.profileData:
data = json.loads(user_profile.profileData)
for key, value in data.iteritems():
fields.append(search.TextField(name='pd_%s' % key.replace(' ', '_'), value=value))
create_user_index_document(user_index, app_user_email, fields)
@returns([UserDetailsTO])
@arguments(name_or_email_term=unicode, app_id=unicode)
def search_users_via_name_or_email(name_or_email_term, app_id=None):
logging.info("Looking for users with term '%s'." % name_or_email_term)
if len(name_or_email_term) < 3:
logging.info("Search term is to short. Bye bye.")
return []
name_or_email_term = name_or_email_term.replace('"', '')
if app_id:
query = search.Query(query_string='email:"%s" OR name:"%s" app_id:%s' % (name_or_email_term, name_or_email_term, app_id),
options=search.QueryOptions(returned_fields=['email', 'name', 'language', 'app_id'], limit=10))
else:
query = search.Query(query_string='email:"%s" OR name:"%s"' % (name_or_email_term, name_or_email_term),
options=search.QueryOptions(returned_fields=['email', 'name', 'language', 'app_id'], limit=10))
search_result = search.Index(name=USER_INDEX).search(query)
return [UserDetailsTO.create(email=doc.fields[0].value,
name=doc.fields[1].value,
language=doc.fields[2].value,
app_id=doc.fields[3].value,
avatar_url=None)
for doc in search_result.results]
@returns([UserDetailsTO])
@arguments(connection=unicode, name_or_email_term=unicode, app_id=unicode, include_avatar=bool)
def search_users_via_friend_connection_and_name_or_email(connection, name_or_email_term, app_id=None, include_avatar=False):
"""Search for users in the USER_INDEX.
connection: The account of the connection (human or service).
In case of a service searching across identities is possible via ommiting the slash and everything after it.
name_or_email_term: A fragment of the name or email of the user you are looking for."""
if len(name_or_email_term) < 3:
return []
connection = connection.encode('utf8').replace('"', '')
name_or_email_term = name_or_email_term.replace('"', '')
if app_id:
query = search.Query(query_string='connections:"@@%s@@" AND (email:"%s" OR name:"%s") app_id:%s' % (connection, name_or_email_term, name_or_email_term, app_id),
options=search.QueryOptions(returned_fields=['email', 'name', 'language', 'app_id'], limit=10))
else:
query = search.Query(query_string='connections:"@@%s@@" AND (email:"%s" OR name:"%s")' % (connection, name_or_email_term, name_or_email_term),
options=search.QueryOptions(returned_fields=['email', 'name', 'language', 'app_id'], limit=10))
search_result = search.Index(name=USER_INDEX).search(query)
avatar_urls = dict()
if include_avatar:
for p in get_user_profiles([create_app_user_by_email(d.fields[0].value, d.fields[3].value)
for d in search_result.results]):
avatar_urls[p.user] = p.avatarUrl
def create_user_detail(doc):
if include_avatar:
avatar_url = avatar_urls.get(create_app_user_by_email(d.fields[0].value, d.fields[3].value))
else:
avatar_url = None
return UserDetailsTO.create(email=d.fields[0].value,
name=doc.fields[1].value,
language=doc.fields[2].value,
avatar_url=avatar_url,
app_id=doc.fields[3].value)
return [create_user_detail(d) for d in search_result.results]
@returns(UserProfile)
@arguments(email=unicode, language=unicode, name=unicode)
def get_profile_for_google_user(email, language, name):
user = users.User(email)
user_profile = get_user_profile(user)
if not user_profile:
user_profile = UserProfile(parent=parent_key(user), key_name=user.email())
user_profile.name = name if name else user.email()
user_profile.language = language
user_profile.version = 1
put_and_invalidate_cache(user_profile, ProfilePointer.create(user))
update_friends(user_profile, [u"name", u"language"])
return user_profile
@returns(Avatar)
@arguments(app_user=users.User, fb_id=unicode, profile_or_key=(Profile, db.Key), avatar_or_key=(Avatar, db.Key),
retry_count=int)
def _get_and_save_facebook_avatar(app_user, fb_id, profile_or_key, avatar_or_key, retry_count=0):
if retry_count == 5:
logging.debug("Reached max retry count. Giving up trying to get the facebook avatar for %s.", app_user)
return None
avatar = db.get(avatar_or_key) if isinstance(avatar_or_key, db.Key) else avatar_or_key
if avatar.picture:
logging.debug("In the mean time, there already is an avatar set for %s. Stop retrying...", app_user)
return avatar
profile_or_key_is_key = isinstance(profile_or_key, db.Key)
try:
url = 'https://graph.facebook.com/%s/picture' % fb_id
response = urlfetch.fetch(url, deadline=60)
if response.status_code == 404:
logging.warn('Facebook avatar not found. Giving up trying to get the facebook avatar for %s', app_user)
return None
if response.status_code != 200:
logging.warn('Recieved code %s from facebook while fetching avatar. Retrying... \n%s', response.code,
response.content)
profile_key = profile_or_key if profile_or_key_is_key else profile_or_key.key()
deferred.defer(_get_and_save_facebook_avatar, app_user, fb_id, profile_key, avatar.key(), retry_count + 1,
_countdown=5)
return None
image = response.content
def update_avatar_and_profile(profile_or_key_is_key):
avatar = db.get(avatar_or_key) if isinstance(avatar_or_key, db.Key) else avatar_or_key
if avatar.picture:
logging.debug("In the mean time, there already is an avatar set for %s. Stopping...", app_user)
return None, None
avatar.picture = image
avatar.put()
profile = db.get(profile_or_key) if profile_or_key_is_key else profile_or_key
_calculateAndSetAvatarHash(profile, image)
if profile_or_key_is_key:
profile.put()
return avatar
if profile_or_key_is_key:
xg_on = db.create_transaction_options(xg=True)
avatar = db.run_in_transaction_options(xg_on, update_avatar_and_profile, profile_or_key_is_key)
else:
avatar = update_avatar_and_profile(profile_or_key_is_key)
except Exception as e:
avatar.put() # put empty to get avatar id.
if isinstance(e, DeadlineExceededError) or isinstance(e, HTTPException) and e.message and 'deadline' in e.message.lower():
logging.debug("Timeout while retrieving facebook avatar for %s. Retrying...", app_user)
profile_key = profile_or_key if profile_or_key_is_key else profile_or_key.key()
deferred.defer(_get_and_save_facebook_avatar, app_user, fb_id, profile_key, avatar.key(), retry_count + 1,
_countdown=5)
else:
logging.exception("Failed to retrieve facebook avatar for %s.", app_user)
return avatar
@returns(UserProfile)
@arguments(access_token=unicode, app_user=users.User, update=bool, language=unicode, app_id=unicode)
def get_profile_for_facebook_user(access_token, app_user, update=False, language=DEFAULT_LANGUAGE, app_id=App.APP_ID_ROGERTHAT):
gapi = facebook.GraphAPI(access_token)
fields = ["id", "first_name", "last_name", "name", "verified", "locale", "gender", "email", "birthday", "link"]
fb_profile = gapi.get_object("me", fields=','.join(fields))
logging.debug("/me graph response: %s", fb_profile)
if not app_user:
if "email" in fb_profile:
app_user = create_app_user(users.User(fb_profile["email"]), app_id)
else:
raise FailedToBuildFacebookProfileException(
localize(language, 'There is no e-mail address configured in your facebook account. Please use the e-mail based login.'))
# TODO we should validate app.user_regex
# TODO we should check if email is not used for a service account
couple_facebook_id_with_profile(app_user, access_token)
profile = get_user_profile(app_user)
if not profile or update:
if not profile:
profile = FacebookUserProfile(parent=parent_key(app_user), key_name=app_user.email())
profile.app_id = app_id
avatar = Avatar(user=app_user)
else:
avatar = get_avatar_by_id(profile.avatarId)
if not avatar:
avatar = Avatar(user=app_user)
if fb_profile.get("name"):
profile.name = fb_profile["name"]
else:
profile.name = get_human_user_from_app_user(app_user).email().replace("@", " at ")
if profile.birthdate is None and fb_profile.get("birthday"):
birthday = fb_profile["birthday"].split("/")
profile.birthdate = get_epoch_from_datetime(
datetime.date(int(birthday[2]), int(birthday[0]), int(birthday[1])))
if profile.gender is None and fb_profile.get("gender"):
gender = fb_profile["gender"]
if gender == "male":
profile.gender = UserProfile.GENDER_MALE
elif gender == "female":
profile.gender = UserProfile.GENDER_FEMALE
else:
profile.gender = UserProfile.GENDER_CUSTOM
avatar = _get_and_save_facebook_avatar(app_user, fb_profile["id"], profile, avatar)
profile.avatarId = avatar.key().id()
profile.profile_url = fb_profile.get("link")
profile.access_token = access_token
profile.version = 1
put_and_invalidate_cache(profile, ProfilePointer.create(app_user))
update_friends(profile, [u"name", u"avatar"])
update_mobiles(app_user, profile)
return profile
@returns(NoneType)
@arguments(app_user=users.User, access_token=unicode)
def couple_facebook_id_with_profile(app_user, access_token):
deferred.defer(_couple_facebook_id_with_profile, app_user, access_token)
def _couple_facebook_id_with_profile(app_user, access_token):
try:
gapi = facebook.GraphAPI(access_token)
fb_profile = gapi.get_object("me")
except facebook.GraphAPIError, e:
if e.type == "OAuthException":
# throwing a BusinessException(PermanentTaskFailure) will make sure the task won't retry and keep failing
raise BusinessException("Giving up because we caught an OAuthException: %s" % e)
else:
raise e
FacebookProfilePointer(key_name=fb_profile["id"], user=app_user).put()
_discover_registered_friends_via_facebook_profile(app_user, access_token)
def _discover_registered_friends_via_facebook_profile(app_user, access_token):
facebook_friends = get_friend_list_from_facebook(access_token)
friend_ids = list({f['id'] for f in facebook_friends})
matches = get_existing_profiles_via_facebook_ids(friend_ids, get_app_id_from_app_user(app_user))
invites_sent = db.get([db.Key.from_path(FacebookDiscoveryInvite.kind(), rtId.email(), parent=parent_key(app_user))
for _, rtId in matches])
new_invites = list()
for match, invite in zip(matches, invites_sent):
fb_friend_user = match[1]
if invite:
logging.debug('%s and %s are already coupled in the past',
app_user.email(), fb_friend_user.email())
else:
logging.info('Creating friend connection between %s and %s because they\'re friends on facebook',
app_user.email(), fb_friend_user.email())
new_invites.append(FacebookDiscoveryInvite(key_name=fb_friend_user.email(), parent=parent_key(app_user)))
deferred.defer(makeFriends, app_user, fb_friend_user, fb_friend_user, servicetag=None, origin=None,
notify_invitee=False, notify_invitor=False, _countdown=30)
if new_invites:
db.put_async(new_invites)
def _send_message_to_inform_user_about_a_new_join(new_user, fb_friend_user):
def trans():
key_name = fb_friend_user.email()
parent = parent_key(new_user)
invite = FacebookDiscoveryInvite.get_by_key_name(key_name, parent)
if invite:
return
db.put_async(FacebookDiscoveryInvite(key_name=key_name, parent=parent))
friend_map = get_friends_map(new_user)
if fb_friend_user in friend_map.friends:
return
deferred.defer(_send_message_to_inform_user_about_a_new_join_step_2,
fb_friend_user, new_user, _transactional=True)
db.run_in_transaction(trans)
def _send_message_to_inform_user_about_a_new_join_step_2(fb_friend_user, new_user):
new_user_profile, fb_friend_profile = get_profile_infos(
[new_user, fb_friend_user], expected_types=[UserProfile, UserProfile])
azzert(new_user_profile.app_id == fb_friend_profile.app_id)
app_name = get_app_name_by_id(new_user_profile.app_id)
to_language = fb_friend_profile.language if fb_friend_profile else DEFAULT_LANGUAGE
message_text = localize(
to_language, "%(name)s just joined %(app_name)s, and we found you in his facebook friends list!", name=new_user_profile.name, app_name=app_name)
button = ButtonTO()
button.id = INVITE_ID
button.caption = localize(
to_language, "Invite %(name)s to connect on %(app_name)s", name=new_user_profile.name, app_name=app_name)
button.action = None
button.ui_flags = 0
def trans():
message = sendMessage(MC_DASHBOARD, [UserMemberTO(fb_friend_user)], Message.FLAG_ALLOW_DISMISS, 0, None,
message_text, [button], None, get_app_by_user(fb_friend_user).core_branding_hash,
INVITE_FACEBOOK_FRIEND, is_mfr=False)
message.invitor = fb_friend_user
message.invitee = new_user
message.put()
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
@returns(NoneType)
@arguments(message=Message)
def ack_facebook_invite(message):
azzert(message.tag == INVITE_FACEBOOK_FRIEND)
memberStatus = message.memberStatusses[message.members.index(message.invitor)]
if not memberStatus.dismissed and message.buttons[memberStatus.button_index].id == INVITE_ID:
profile = get_user_profile(message.invitee)
if profile:
invite(message.invitor, message.invitee.email(), profile.name, None, profile.language, None, None,
get_app_id_from_app_user(message.invitor))
else:
logging.info('Invitee\'s profile doesn\'t exist anymore: %s', message.invitee)
@returns(NoneType)
@arguments(linkedin_client=LinkedInClient, token=unicode, secret=unicode, user=users.User)
def get_profile_for_linkedin_user(linkedin_client, token, secret, user):
profile_url = "http://api.linkedin.com/v1/people/~:(id,first-name,last-name,picture-url)"
response = linkedin_client.make_request(profile_url, token=token, secret=secret, headers={"x-li-format": "json"})
if response.status_code != 200:
raise Exception("Could not get connections from linkedin")
logging.info(response.content)
profile = json.loads(response.content)
url = profile.get('pictureUrl', None)
name = "%s %s" % (profile['firstName'], profile['lastName'])
if url:
avatar = urlfetch.fetch(url, deadline=10)
if avatar.status_code == 200:
avatar = avatar.content
else:
avatar = None
else:
avatar = None
pd = ProfileDiscoveryResult(parent=parent_key(user), type=ProfileDiscoveryResult.TYPE_LINKEDIN,
account=str(profile['id']), name=name, data=json.dumps(profile), avatar=avatar,
timestamp=now())
pd.put()
deferred.defer(update_profile_from_profile_discovery, user, pd, _transactional=db.is_in_transaction())
@returns(int)
@arguments(user_code=unicode)
def create_short_url(user_code):
su = ShortURL()
su.full = "/q/i" + user_code
su.put()
return su.key().id()
@returns([ShortURL])
@arguments(app_id=unicode, amount=(int, long))
def generate_unassigned_short_urls(app_id, amount):
@db.non_transactional
def allocate_ids():
return db.allocate_ids(db.Key.from_path(ShortURL.kind(), 1), amount) # (start, end)
result = list()
id_range = allocate_ids()
for short_url_id in xrange(id_range[0], id_range[1] + 1):
user_code = userCode(users.User("%s@%s" % (short_url_id, app_id)))
result.append(ShortURL(key=db.Key.from_path(ShortURL.kind(), short_url_id), full="/q/i" + user_code))
for c in chunks(result, 200):
db.put(c)
return result
def _validate_name(name):
if name is None:
raise ValueError("Name can not be null")
if not name:
raise ValueError("Name can not be empty")
name = name.strip().replace('@', ' at ')
if len(name) > 50:
raise ValueError("Name can not be longer than 50 characters. name: '%s' length: %s " % (name, len(name)))
return name
def _create_new_avatar(user, add_trial_overlay, image=None):
avatar = Avatar(user=user)
image = UNKNOWN_AVATAR if not image else base64.b64decode(image)
if add_trial_overlay:
image = add_trial_service_overlay(image)
avatar.picture = db.Blob(image)
avatar.put()
return avatar, image
@returns(UserProfile)
@arguments(app_user=users.User, name=unicode, language=unicode, ysaaa=bool, owncloud_password=unicode, image=unicode,
tos_version=(int, long, NoneType), consent_push_notifications_shown=bool)
def create_user_profile(app_user, name, language=None, ysaaa=False, owncloud_password=None, image=None,
tos_version=None, consent_push_notifications_shown=False):
# type: (users.User, unicode, unicode, bool, unicode, unicode, int, bool) -> UserProfile
name = _validate_name(name)
def trans_create(avatar_image):
azzert(not get_user_profile(app_user, cached=False))
avatar, image = _create_new_avatar(app_user, False, avatar_image)
user_profile = UserProfile(parent=parent_key(app_user), key_name=app_user.email())
user_profile.name = name
user_profile.language = language
user_profile.avatarId = avatar.key().id()
user_profile.app_id = get_app_id_from_app_user(app_user)
user_profile.owncloud_password = owncloud_password
if tos_version:
user_profile.tos_version = tos_version
if consent_push_notifications_shown:
user_profile.consent_push_notifications_shown = True
_calculateAndSetAvatarHash(user_profile, image)
put_and_invalidate_cache(user_profile, ProfilePointer.create(app_user), ProfileHashIndex.create(app_user))
return user_profile
user_profile = run_in_transaction(trans_create, True, image)
if not ysaaa:
schedule_re_index(app_user)
return user_profile
@returns(tuple)
@arguments(service_user=users.User, url=unicode, email=unicode)
def put_loyalty_user(service_user, url, email):
su = None # ShortURL
m = re.match("(HTTPS?://)(.*)/(M|S)/(.*)", url.upper())
if m:
from rogerthat.pages.shortner import get_short_url_by_code
code = m.group(4)
su = get_short_url_by_code(code)
if su and not su.full.startswith("/q/i"):
su = None
if su:
si = None
else:
logging.debug('Create new unassigned short url, because the provided loyalty user URL is unkown (%s)', url)
si = get_default_service_identity(service_user)
su = generate_unassigned_short_urls(si.app_id, 1)[0]
url = su.full
user_code = su.full[4:]
pp = ProfilePointer.get(user_code)
if pp:
app_user = pp.user
else:
service_profile = get_service_profile(service_user)
si = si or get_default_service_identity(service_user)
app_id = si.app_id
app_user = put_loyalty_user_profile(email.strip(), app_id, user_code, su.key().id(),
service_profile.defaultLanguage)
return url, app_user
@returns(users.User)
@arguments(email=unicode, app_id=unicode, user_code=unicode, short_url_id=(int, long), language=unicode)
def put_loyalty_user_profile(email, app_id, user_code, short_url_id, language):
app_user = create_app_user(users.User(email), app_id)
name = _validate_name(email)
def trans_create():
rogerthat_profile = get_service_or_user_profile(users.User(email))
if rogerthat_profile and isinstance(rogerthat_profile, ServiceProfile):
from rogerthat.bizz.service import AppFailedToCreateUserProfileWithExistingServiceException
raise AppFailedToCreateUserProfileWithExistingServiceException(email)
user_profile = get_user_profile(app_user, cached=False)
is_new_profile = False
if not user_profile:
deactivated_user_profile = get_deactivated_user_profile(app_user)
if deactivated_user_profile:
deferred.defer(reactivate_user_profile, deactivated_user_profile, app_user, _transactional=True)
ActivationLog(timestamp=now(), email=app_user.email(), mobile=None,
description="Reactivate user account by registering a paper loyalty card").put()
else:
is_new_profile = True
avatar, image = _create_new_avatar(app_user, add_trial_overlay=False)
user_profile = UserProfile(parent=parent_key(app_user), key_name=app_user.email())
user_profile.name = name
user_profile.language = language
user_profile.avatarId = avatar.key().id()
user_profile.app_id = app_id
_calculateAndSetAvatarHash(user_profile, image)
pp = ProfilePointer(key=db.Key.from_path(ProfilePointer.kind(), user_code))
pp.user = app_user
pp.short_url_id = short_url_id
if is_new_profile:
put_and_invalidate_cache(user_profile, pp, ProfilePointer.create(app_user))
else:
pp.put()
run_in_transaction(trans_create, True)
schedule_re_index(app_user)
return app_user
@returns(tuple)
@arguments(service_user=users.User, name=unicode, is_trial=bool, update_func=types.FunctionType,
supported_app_ids=[unicode])
def create_service_profile(service_user, name, is_trial=False, update_func=None, supported_app_ids=None):
from rogerthat.bizz.service import create_default_qr_templates
name = _validate_name(name)
if supported_app_ids is None:
default_app = get_default_app()
default_app_id = default_app.app_id if default_app else App.APP_ID_ROGERTHAT
supported_app_ids = [default_app_id]
else:
default_app_id = supported_app_ids[0]
def trans_prepare_create():
avatar, image = _create_new_avatar(service_user, is_trial)
from rogerthat.bizz.service import _create_recommendation_qr_code
share_sid_key = _create_recommendation_qr_code(service_user, ServiceIdentity.DEFAULT, default_app_id)
return avatar, image, share_sid_key
def trans_create(avatar, image, share_sid_key):
azzert(not get_service_profile(service_user, cached=False))
azzert(not get_default_service_identity_not_cached(service_user))
profile = ServiceProfile(parent=parent_key(service_user), key_name=service_user.email())
profile.avatarId = avatar.key().id()
_calculateAndSetAvatarHash(profile, image)
service_identity_user = create_service_identity_user(service_user, ServiceIdentity.DEFAULT)
service_identity = ServiceIdentity(key=ServiceIdentity.keyFromUser(service_identity_user))
service_identity.inheritanceFlags = 0
service_identity.name = name
service_identity.description = "%s (%s)" % (name, service_user.email())
service_identity.shareSIDKey = share_sid_key
service_identity.shareEnabled = False
service_identity.creationTimestamp = now()
service_identity.defaultAppId = supported_app_ids[0]
service_identity.appIds = supported_app_ids
update_result = update_func(profile, service_identity) if update_func else None
put_and_invalidate_cache(profile, service_identity,
ProfilePointer.create(service_user),
ProfileHashIndex.create(service_user))
deferred.defer(create_default_qr_templates, service_user, _transactional=True)
return profile, service_identity, update_result
avatar, image, share_sid_key = run_in_transaction(trans_prepare_create, True)
try:
profile, service_identity, update_result = run_in_transaction(trans_create, True, avatar, image, share_sid_key)
return (profile, service_identity, update_result) if update_func else (profile, service_identity)
except:
db.delete([avatar, share_sid_key])
raise
def update_password_hash(profile, passwordHash, lastUsedMgmtTimestamp):
profile.passwordHash = passwordHash
profile.lastUsedMgmtTimestamp = lastUsedMgmtTimestamp
profile.put()
def update_user_profile(app_user, name, image, language):
def trans():
user_profile = get_user_profile(app_user)
changed_properties = []
if user_profile.language != language:
user_profile.language = language
changed_properties.append(u"language")
db.delete_async(get_broadcast_settings_flow_cache_keys_of_user(app_user))
if user_profile.name != name:
changed_properties.append(u"name")
user_profile.name = name
if image:
_update_avatar(user_profile, image, False)
changed_properties.append(u"avatar")
user_profile.version += 1
user_profile.put()
update_mobiles(app_user, user_profile) # update myIdentity
update_friends(user_profile, changed_properties) # notify my friends
return user_profile
user_profile = run_in_transaction(trans, xg=True)
schedule_re_index(app_user)
return user_profile
def update_service_profile(service_user, image, add_trial_overlay):
from rogerthat.bizz.job.update_friends import schedule_update_all_friends_of_service_user
def trans():
service_profile = get_service_profile(service_user)
if image:
_update_avatar(service_profile, image, add_trial_overlay)
service_profile.version += 1
service_profile.put()
schedule_update_all_friends_of_service_user(service_profile)
return run_in_transaction(trans, True)
def _update_avatar(profile, image, add_trial_overlay):
_meta, img_b64 = image.split(',')
image_bytes = base64.b64decode(img_b64)
img = images.Image(str(image_bytes))
img.resize(150, 150)
avatar = get_avatar_by_id(profile.avatarId)
if not avatar:
avatar = Avatar(user=profile.user)
image = img.execute_transforms(images.PNG, 100)
if add_trial_overlay:
image = add_trial_service_overlay(image)
update_avatar_profile(profile, avatar, image)
@returns(NoneType)
@arguments(service_user=users.User, image=str)
def update_service_avatar(service_user, image):
img = images.Image(image)
img.im_feeling_lucky()
img.execute_transforms()
if img.height != img.width:
devation = float(img.width) / float(img.height)
if devation < 0.95 or devation > 1.05:
from rogerthat.bizz.service import AvatarImageNotSquareException
logging.debug("Avatar Size: %sx%s" % (img.width, img.height))
raise AvatarImageNotSquareException()
img = images.Image(image)
img.resize(150, 150)
image = img.execute_transforms(images.PNG, 100)
if is_trial_service(service_user):
image = add_trial_service_overlay(image)
def trans():
service_profile = get_service_profile(service_user)
avatar = get_avatar_by_id(service_profile.avatarId)
if not avatar:
avatar = Avatar(user=service_profile.user)
update_avatar_profile(service_profile, avatar, image)
service_profile.version += 1
service_profile.put()
from rogerthat.bizz.job.update_friends import schedule_update_all_friends_of_service_user
schedule_update_all_friends_of_service_user(service_profile)
return run_in_transaction(trans, xg=True)
def update_avatar_profile(profile, avatar, image):
avatar.picture = db.Blob(image)
avatar.put()
profile.avatarId = avatar.key().id()
_calculateAndSetAvatarHash(profile, image)
def add_trial_service_overlay(image):
image_width = images.Image(image).width
scale = image_width / 50.0
overlay = _get_trial_service_overlay()
if scale != 1:
overlay_img = images.Image(overlay)
new_size = int(scale * overlay_img.width)
overlay_img.resize(new_size, new_size)
overlay = overlay_img.execute_transforms(overlay_img.format, 100)
return composite([(image, 0, 0, 1.0, TOP_LEFT),
(overlay, int(5 * scale), int(-5 * scale), 1.0, BOTTOM_LEFT)], image_width, image_width)
@returns(unicode)
@arguments(user=users.User, app_id=unicode)
def get_profile_info_name(user, app_id):
if user == MC_DASHBOARD:
app_name = get_app_name_by_id(app_id)
if app_id == App.APP_ID_ROGERTHAT:
return u"%s Dashboard" % app_name
else:
return app_name
else:
profile_info = get_profile_info(user)
if profile_info:
return profile_info.name or profile_info.qualifiedIdentifier or remove_slash_default(user).email()
else:
return user.email()
def update_profile_from_profile_discovery(app_user, discovery):
azzert(discovery.user == app_user)
changed_properties = []
user_profile = get_user_profile(app_user)
new_name = discovery.name.strip()
if user_profile.name != new_name:
changed_properties.append(u"name")
user_profile.name = new_name
if discovery.avatar:
img = images.Image(str(discovery.avatar))
img.resize(150, 150)
avatar = get_avatar_by_id(user_profile.avatarId)
if not avatar:
avatar = Avatar(user=app_user)
image = img.execute_transforms(images.PNG, 100)
avatar.picture = db.Blob(image)
avatar.put()
user_profile.avatarId = avatar.key().id()
_calculateAndSetAvatarHash(user_profile, image)
changed_properties.append(u"avatar")
user_profile.version += 1
user_profile.put()
update_mobiles(app_user, user_profile)
update_friends(user_profile, changed_properties)
@returns(NoneType)
@arguments(profile_info=ProfileInfo, changed_properties=[unicode])
def update_friends(profile_info, changed_properties=None):
"""If profile_info is human user ==> update friends and services of human_user
If profile_info is service_identity ==> update human friendMaps of service_identity"""
from rogerthat.bizz.job.update_friends import schedule_update_friends_of_profile_info
schedule_update_friends_of_profile_info(profile_info, changed_properties)
@returns([users.User])
@arguments(app_user=users.User, users_=[users.User])
def find_rogerthat_users_via_email(app_user, users_):
users_ = filter(is_clean_app_user_email, users_)
users_ = [p.user for p in get_existing_user_profiles(users_)]
result = list()
friend_map = get_friends_map(app_user)
for u in users_:
if u in friend_map.friends:
continue
result.append(u)
return result
@returns([FacebookRogerthatProfileMatchTO])
@arguments(app_user=users.User, access_token=unicode)
def find_rogerthat_users_via_facebook(app_user, access_token):
couple_facebook_id_with_profile(app_user, access_token)
friends = get_friend_list_from_facebook(access_token)
friends_dict = dict([(f['id'], (f['name'], f['picture']['data']['url'])) for f in friends])
matches = get_existing_profiles_via_facebook_ids(friends_dict.keys(), get_app_id_from_app_user(app_user))
result = list()
friend_map = get_friends_map(app_user)
for fbId, rtId in matches:
if rtId in friend_map.friends:
continue
result.append(FacebookRogerthatProfileMatchTO(
fbId, get_human_user_from_app_user(rtId).email(), friends_dict[fbId][0], friends_dict[fbId][1]))
return result
def get_friend_list_from_facebook(access_token):
args = dict()
args["access_token"] = access_token
args["fields"] = 'name,picture'
result = urlfetch.fetch(url="https://graph.facebook.com/me/friends?" + urlencode(args), deadline=55)
logging.info(result.content)
if result.status_code == 200:
return json.loads(result.content)["data"]
raise Exception("Could not get friend list from facebook!\nstatus: %s\nerror:%s" %
(result.status_code, result.content))
def _calculateAndSetAvatarHash(profile, image):
digester = hashlib.sha256()
digester.update(image)
profile.avatarHash = digester.hexdigest().upper()
logging.info("New avatar hash: %s", profile.avatarHash)
from rogerthat.pages.profile import get_avatar_cached
invalidate_cache(get_avatar_cached, profile.avatarId, 50)
invalidate_cache(get_avatar_cached, profile.avatarId, 67)
invalidate_cache(get_avatar_cached, profile.avatarId, 100)
invalidate_cache(get_avatar_cached, profile.avatarId, 150)
@returns(NoneType)
@arguments(user=users.User, user_profile=UserProfile, skipped_mobile=Mobile, countdown=(int, long))
def update_mobiles(user, user_profile, skipped_mobile=None, countdown=5):
request = IdentityUpdateRequestTO()
request.identity = get_identity(user, user_profile)
deferred.defer(_update_mobiles_deferred, user, request, skipped_mobile, _transactional=db.is_in_transaction(),
_countdown=countdown)
def _update_mobiles_deferred(user, request, skipped_mobile):
logging.info("Updating mobile of user %s" % user)
extra_kwargs = dict()
if skipped_mobile is not None:
extra_kwargs[SKIP_ACCOUNTS] = [skipped_mobile.account]
identityUpdate(identity_update_response_handler, logError, user, request=request, **extra_kwargs)
_TRIAL_SERVICE_OVERLAY_PATH = os.path.join(CURRENT_DIR, 'trial_service_overlay.png')
def _get_trial_service_overlay():
f = open(_TRIAL_SERVICE_OVERLAY_PATH, "rb")
try:
return f.read()
finally:
f.close()
@returns(NoneType)
@arguments(service_user=users.User, app_user=users.User, data_string=unicode)
def set_profile_data(service_user, app_user, data_string):
from rogerthat.bizz.service import InvalidJsonStringException, InvalidValueException, FriendNotFoundException
data_object = None
try:
data_object = json.loads(data_string)
except:
raise InvalidJsonStringException()
if data_object is None:
raise InvalidJsonStringException()
if not isinstance(data_object, dict):
raise InvalidJsonStringException()
for k, v in data_object.iteritems():
if not isinstance(v, basestring):
raise InvalidValueException(k, u"The values of profile_data must be strings")
if not data_object:
return
def trans(data_update):
user_profile = get_user_profile(app_user, cached=False)
if not user_profile:
logging.info('User %s not found', app_user.email())
raise FriendNotFoundException()
# Deserialize key-value store
data = json.loads(user_profile.profileData) if user_profile.profileData else dict()
# Update existing user data with new values
data.update(data_update)
# Remove keys with empty values
for key in [key for key, value in data.iteritems() if value is None]:
data.pop(key)
user_profile.profileData = json.dumps(data) if data else None
user_profile.put()
on_trans_committed(update_mobiles, app_user, user_profile, countdown=0)
on_trans_committed(schedule_re_index, app_user)
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans, data_object)
def _archive_friend_connection(fsic_key):
app_user = users.User(fsic_key.parent().name())
service_identity_user = users.User(fsic_key.name())
def trans():
to_put = list()
user_data_key = UserData.createKey(app_user, service_identity_user)
fsic, user_data = db.get([fsic_key, user_data_key])
if fsic:
archived_fsic = fsic.archive(FriendServiceIdentityConnectionArchive)
to_put.append(archived_fsic)
if user_data:
archived_user_data = user_data.archive(UserDataArchive)
to_put.append(archived_user_data)
if to_put:
db.put(to_put)
db.run_in_transaction(trans)
breakFriendShip(service_identity_user, app_user)
def _unarchive_friend_connection(fsic_archive_key):
app_user = users.User(fsic_archive_key.parent().name())
service_identity_user = users.User(fsic_archive_key.name())
user_data_key = UserDataArchive.createKey(app_user, service_identity_user)
fsic_archive, user_data_archive = db.get([fsic_archive_key, user_data_key])
to_delete = [fsic_archive]
if user_data_archive:
user_data_data = user_data_archive.data
to_delete.append(user_data_archive)
else:
user_data_data = None
# set disabled and enabled broadcast types
def trans():
fsic = get_friend_serviceidentity_connection(app_user, service_identity_user)
fsic.disabled_broadcast_types = fsic_archive.disabled_broadcast_types
fsic.enabled_broadcast_types = fsic_archive.enabled_broadcast_types
fsic.put()
db.delete(to_delete)
deferred.defer(makeFriends, service_identity_user, app_user, app_user, None, None, notify_invitee=False,
notify_invitor=False, user_data=user_data_data, _countdown=2, _transactional=True)
db.run_in_transaction(trans)
@returns()
@arguments(service_user=users.User)
def set_service_disabled(service_user):
"""
Disconnects all connected users, stores them in an archive and deletes the service from search index.
"""
from rogerthat.bizz.service import _cleanup_search_index, SERVICE_INDEX, SERVICE_LOCATION_INDEX
from rogerthat.bizz.job.delete_service import remove_autoconnected_service
def trans():
to_put = list()
service_profile = get_service_profile(service_user)
service_profile.expiredAt = now()
service_profile.enabled = False
to_put.append(service_profile)
service_identity_keys = get_service_identities_query(service_user, True)
search_configs = db.get(
[SearchConfig.create_key(create_service_identity_user(users.User(key.parent().name()), key.name())) for
key in service_identity_keys])
svc_index = search.Index(name=SERVICE_INDEX)
loc_index = search.Index(name=SERVICE_LOCATION_INDEX)
for search_config in search_configs:
if search_config:
search_config.enabled = False
to_put.append(search_config)
on_trans_committed(_cleanup_search_index, search_config.service_identity_user.email(), svc_index,
loc_index)
for objects_to_put in chunks(to_put, 200):
put_and_invalidate_cache(*objects_to_put)
deferred.defer(cleanup_sessions, service_user, _transactional=True)
deferred.defer(cleanup_friend_connections, service_user, _transactional=True)
deferred.defer(remove_autoconnected_service, service_user, _transactional=True)
run_in_transaction(trans, True)
@returns()
@arguments(service_user=users.User)
def cleanup_friend_connections(service_user):
run_job(get_all_service_friend_keys_query, [service_user], _archive_friend_connection, [])
@returns()
@arguments(service_user=users.User)
def cleanup_sessions(service_user):
for user_profile_key in UserProfile.all(keys_only=True).filter('owningServiceEmails', service_user.email()):
drop_sessions_of_user(users.User(user_profile_key.name()))
drop_sessions_of_user(service_user)
send_message(service_user, 'rogerthat.system.logout')
@returns()
@arguments(service_user=users.User)
def set_service_enabled(service_user):
"""
Re-enables the service profile and restores all connected users.
"""
service_profile = get_service_profile(service_user)
service_profile.expiredAt = 0
service_profile.enabled = True
service_profile.put()
run_job(get_all_archived_service_friend_keys_query, [service_user], _unarchive_friend_connection, [])
| rogerthat-platform/rogerthat-backend | src/rogerthat/bizz/profile.py | Python | apache-2.0 | 48,219 | 0.003152 |
import string
import os.path
import urllib
import cPickle
import copy
import sys
from glideinwms.creation.lib.matchPolicy import MatchPolicy
from glideinwms.lib import hashCrypto
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# Frontend config related classes
#
############################################################
#
# Configuration
#
############################################################
class FrontendConfig:
def __init__(self):
# set default values
# user should modify if needed
self.frontend_descript_file = "frontend.descript"
self.group_descript_file = "group.descript"
self.params_descript_file = "params.cfg"
self.attrs_descript_file = "attrs.cfg"
self.signature_descript_file = "signatures.sha1"
self.signature_type = "sha1"
self.history_file = "history.pk"
# global configuration of the module
frontendConfig=FrontendConfig()
############################################################
#
# Helper function
#
############################################################
def get_group_dir(base_dir,group_name):
return os.path.join(base_dir,"group_"+group_name)
############################################################
#
# Generic Class
# You most probably don't want to use these
#
############################################################
# loads a file or URL composed of
# NAME VAL
# and creates
# self.data[NAME]=VAL
# It also defines:
# self.config_file="name of file"
# If validate is defined, also defines
# self.hash_value
class ConfigFile:
def __init__(self,config_dir,config_file,convert_function=repr,
validate=None): # if defined, must be (hash_algo,value)
self.config_dir=config_dir
self.config_file=config_file
self.data={}
self.load(os.path.join(config_dir,config_file),convert_function,validate)
self.derive()
def open(self,fname):
if (fname[:5]=="http:") or (fname[:6]=="https:") or (fname[:4]=="ftp:"):
# one of the supported URLs
return urllib.urlopen(fname)
else:
# local file
return open(fname,"r")
def validate_func(self,data,validate,fname):
if validate is not None:
vhash=hashCrypto.get_hash(validate[0],data)
self.hash_value=vhash
if (validate[1] is not None) and (vhash!=validate[1]):
raise IOError, "Failed validation of '%s'. Hash %s computed to '%s', expected '%s'"%(fname,validate[0],vhash,validate[1])
def load(self,fname,convert_function,
validate=None): # if defined, must be (hash_algo,value)
self.data={}
fd=self.open(fname)
try:
data=fd.read()
self.validate_func(data,validate,fname)
lines=data.splitlines()
del data
for line in lines:
if line[0]=="#":
continue # comment
if len(string.strip(line))==0:
continue # empty line
self.split_func(line,convert_function)
finally:
fd.close()
def split_func(self,line,convert_function):
larr=string.split(line,None,1)
lname=larr[0]
if len(larr)==1:
lval=""
else:
lval=larr[1]
exec("self.data['%s']=%s"%(lname,convert_function(lval)))
def derive(self):
return # by default, do nothing
def __str__(self):
output = '\n'
for key in self.data.keys():
output += '%s = %s, (%s)\n' % (key, str(self.data[key]), type(self.data[key]))
return output
# load from the group subdir
class GroupConfigFile(ConfigFile):
def __init__(self,base_dir,group_name,config_file,convert_function=repr,
validate=None): # if defined, must be (hash_algo,value)
ConfigFile.__init__(self,get_group_dir(base_dir,group_name),config_file,convert_function,validate)
self.group_name=group_name
# load both the main and group subdir config file
# and join the results
# Also defines:
# self.group_hash_value, if group_validate defined
class JoinConfigFile(ConfigFile):
def __init__(self,base_dir,group_name,config_file,convert_function=repr,
main_validate=None,group_validate=None): # if defined, must be (hash_algo,value)
ConfigFile.__init__(self,base_dir,config_file,convert_function,main_validate)
self.group_name=group_name
group_obj=GroupConfigFile(base_dir,group_name,config_file,convert_function,group_validate)
if group_validate is not None:
self.group_hash_value=group_obj.hash_value
#merge by overriding whatever is found in the subdir
for k in group_obj.data.keys():
self.data[k]=group_obj.data[k]
############################################################
#
# Configuration
#
############################################################
class FrontendDescript(ConfigFile):
def __init__(self,config_dir):
global frontendConfig
ConfigFile.__init__(self,config_dir,frontendConfig.frontend_descript_file,
repr) # convert everything in strings
class ElementDescript(GroupConfigFile):
def __init__(self,base_dir,group_name):
global frontendConfig
GroupConfigFile.__init__(self,base_dir,group_name,frontendConfig.group_descript_file,
repr) # convert everything in strings
class ParamsDescript(JoinConfigFile):
def __init__(self,base_dir,group_name):
global frontendConfig
JoinConfigFile.__init__(self,base_dir,group_name,frontendConfig.params_descript_file,
lambda s:"('%s',%s)"%tuple(s.split(None,1))) # split the array
self.const_data={}
self.expr_data={} # original string
self.expr_objs={} # compiled object
for k in self.data.keys():
type_str,val=self.data[k]
if type_str=='EXPR':
self.expr_objs[k]=compile(val,"<string>","eval")
self.expr_data[k]=val
elif type_str=='CONST':
self.const_data[k]=val
else:
raise RuntimeError, "Unknown parameter type '%s' for '%s'!"%(type_str,k)
class AttrsDescript(JoinConfigFile):
def __init__(self,base_dir,group_name):
global frontendConfig
JoinConfigFile.__init__(self,base_dir,group_name,frontendConfig.attrs_descript_file,
str) # they are already in python form
# this one is the special frontend work dir signature file
class SignatureDescript(ConfigFile):
def __init__(self,config_dir):
global frontendConfig
ConfigFile.__init__(self,config_dir,frontendConfig.signature_descript_file,
None) # Not used, redefining split_func
self.signature_type=frontendConfig.signature_type
def split_func(self,line,convert_function):
larr=string.split(line,None)
if len(larr)!=3:
raise RuntimeError, "Invalid line (expected 3 elements, found %i)"%len(larr)
self.data[larr[2]]=(larr[0],larr[1])
# this one is the generic hash descript file
class BaseSignatureDescript(ConfigFile):
def __init__(self,config_dir,signature_fname,signature_type,validate=None):
ConfigFile.__init__(self,config_dir,signature_fname,
None, # Not used, redefining split_func
validate)
self.signature_type=signature_type
def split_func(self,line,convert_function):
larr=string.split(line,None,1)
if len(larr)!=2:
raise RuntimeError, "Invalid line (expected 2 elements, found %i)"%len(larr)
lval=larr[1]
self.data[lval]=larr[0]
############################################################
#
# Processed configuration
#
############################################################
# not everything is merged
# the old element can still be accessed
class ElementMergedDescript:
def __init__(self,base_dir,group_name):
self.frontend_data=FrontendDescript(base_dir).data
if not (group_name in string.split(self.frontend_data['Groups'],',')):
raise RuntimeError, "Group '%s' not supported: %s"%(group_name,self.frontend_data['Groups'])
self.element_data=ElementDescript(base_dir,group_name).data
self.group_name=group_name
self.merge()
#################
# Private
def merge(self):
self.merged_data={}
for t in ('JobSchedds',):
self.merged_data[t]=self.split_list(self.frontend_data[t])+self.split_list(self.element_data[t])
if len(self.merged_data[t])==0:
raise RuntimeError,"Found empty %s!"%t
for t in ('FactoryCollectors',):
self.merged_data[t]=eval(self.frontend_data[t])+eval(self.element_data[t])
if len(self.merged_data[t])==0:
raise RuntimeError,"Found empty %s!"%t
for t in ('FactoryQueryExpr','JobQueryExpr'):
self.merged_data[t]="(%s) && (%s)"%(self.frontend_data[t],self.element_data[t])
for data in (self.frontend_data, self.element_data):
if 'MatchPolicyModule%s'%t in data:
self.merged_data[t] = '(%s) && (%s)' % (
self.merged_data[t], data['MatchPolicyModule%s'%t])
# PM: TODO: Not sure why FactoryMatchAttrs was not in the list below
# To get complete list of FactoryMatchAttrs you need to merge it
for t in ('JobMatchAttrs','FactoryMatchAttrs'):
attributes=[]
names=[]
match_attrs_list = eval(self.frontend_data[t]) + eval(self.element_data[t])
for data in (self.frontend_data, self.element_data):
if 'MatchPolicyModule%s'%t in data:
match_attrs_list += eval(data['MatchPolicyModule%s'%t])
for el in match_attrs_list:
el_name=el[0]
if not (el_name in names):
attributes.append(el)
names.append(el_name)
self.merged_data[t]=attributes
for t in ('MatchExpr',):
self.merged_data[t]="(%s) and (%s)"%(self.frontend_data[t],self.element_data[t])
self.merged_data[t+'CompiledObj']=compile(self.merged_data[t],"<string>","eval")
self.merged_data['MatchPolicyModules'] = []
if 'MatchPolicyFile' in self.frontend_data:
self.merged_data['MatchPolicyModules'].append(MatchPolicy(self.frontend_data['MatchPolicyFile']))
if 'MatchPolicyFile' in self.element_data:
self.merged_data['MatchPolicyModules'].append(MatchPolicy(self.element_data['MatchPolicyFile']))
# We use default ProxySelectionPlugin
self.merged_data['ProxySelectionPlugin']='ProxyAll'
for t in ('ProxySelectionPlugin','SecurityName'):
for data in (self.frontend_data,self.element_data):
if data.has_key(t):
self.merged_data[t]=data[t]
proxies=[]
for data in (self.frontend_data,self.element_data):
if data.has_key('Proxies'):
proxies+=eval(data['Proxies'])
self.merged_data['Proxies']=proxies
proxy_descript_attrs=['ProxySecurityClasses','ProxyTrustDomains',
'ProxyTypes','ProxyKeyFiles','ProxyPilotFiles','ProxyVMIds',
'ProxyVMTypes','ProxyCreationScripts','ProxyUpdateFrequency', 'ProxyVMIdFname', 'ProxyVMTypeFname']
for attr in proxy_descript_attrs:
proxy_descript_data={}
for data in (self.frontend_data,self.element_data):
if data.has_key(attr):
dprs=eval(data[attr])
for k in dprs.keys():
proxy_descript_data[k]=dprs[k]
self.merged_data[attr]=proxy_descript_data
return
def split_list(self,val):
if val=='None':
return []
elif val=='':
return []
else:
return string.split(val,',')
class GroupSignatureDescript:
def __init__(self,base_dir,group_name):
self.group_name=group_name
sd=SignatureDescript(base_dir)
self.signature_data=sd.data
self.signature_type=sd.signature_type
fd=sd.data['main']
self.frontend_descript_fname=fd[1]
self.frontend_descript_signature=fd[0]
gd=sd.data['group_%s'%group_name]
self.group_descript_fname=gd[1]
self.group_descript_signature=gd[0]
class StageFiles:
def __init__(self,base_URL,descript_fname,validate_algo,signature_hash):
self.base_URL=base_URL
self.validate_algo=validate_algo
self.stage_descript=ConfigFile(base_URL, descript_fname, repr,
(validate_algo,None)) # just get the hash value... will validate later
self.signature_descript=BaseSignatureDescript(base_URL,self.stage_descript.data['signature'],validate_algo,(validate_algo,signature_hash))
if self.stage_descript.hash_value!=self.signature_descript.data[descript_fname]:
raise IOError, "Descript file %s signature invalid, expected'%s' got '%s'"%(descript_fname,self.signature_descript.data[descript_fname],self.stage_descript.hash_value)
def get_stage_file(self,fname,repr):
return ConfigFile(self.base_URL,fname,repr,
(self.validate_algo,self.signature_descript.data[fname]))
def get_file_list(self,list_type): # example list_type == 'preentry_file_list'
if not self.stage_descript.data.has_key(list_type):
raise KeyError,"Unknown list type '%s'; valid typtes are %s"%(list_type,self.stage_descript.data.keys())
list_fname=self.stage_descript.data[list_type]
return self.get_stage_file(self.stage_descript.data[list_type],
lambda x:string.split(x,None,4))
# this class knows how to interpret some of the files in the Stage area
class ExtStageFiles(StageFiles):
def __init__(self,base_URL,descript_fname,validate_algo,signature_hash):
StageFiles.__init__(self,base_URL,descript_fname,validate_algo,signature_hash)
self.preentry_file_list=None
def get_constants(self):
self.load_preentry_file_list()
return self.get_stage_file(self.preentry_file_list.data['constants.cfg'][0],repr)
def get_condor_vars(self):
self.load_preentry_file_list()
return self.get_stage_file(self.preentry_file_list.data['condor_vars.lst'][0],lambda x:string.split(x,None,6))
# internal
def load_preentry_file_list(self):
if self.preentry_file_list is None:
self.preentry_file_list=self.get_file_list('preentry_file_list')
# else, nothing to do
# this class knows how to interpret some of the files in the Stage area
# Will parrpopriately merge the main and the group ones
class MergeStageFiles:
def __init__(self,base_URL,validate_algo,
main_descript_fname,main_signature_hash,
group_name,group_descript_fname,group_signature_hash):
self.group_name=group_name
self.main_stage=ExtStageFiles(base_URL,main_descript_fname,validate_algo,main_signature_hash)
self.group_stage=ExtStageFiles(get_group_dir(base_URL,group_name),group_descript_fname,validate_algo,group_signature_hash)
def get_constants(self):
main_consts=self.main_stage.get_constants()
group_consts=self.group_stage.get_constants()
# group constants override the main ones
for k in group_consts.data.keys():
main_consts.data[k]=group_consts.data[k]
main_consts.group_name=self.group_name
main_consts.group_hash_value=group_consts.hash_value
return main_consts
def get_condor_vars(self):
main_cv=self.main_stage.get_condor_vars()
group_cv=self.group_stage.get_condor_vars()
# group condor_vars override the main ones
for k in group_cv.data.keys():
main_cv.data[k]=group_cv.data[k]
main_cv.group_name=self.group_name
main_cv.group_hash_value=group_cv.hash_value
return main_cv
############################################################
#
# The FrontendGroups may want to preserve some state between
# iterations/invocations. The HistoryFile class provides
# the needed support for this.
#
# There is no fixed schema in the class itself;
# the FrontedGroup is free to store any arbitrary dictionary
# in it.
#
############################################################
class HistoryFile:
def __init__(self, base_dir, group_name, load_on_init = True,
default_factory=None):
"""
The default_factory semantics is the same as the one in collections.defaultdict
"""
self.base_dir = base_dir
self.group_name = group_name
self.fname = os.path.join(get_group_dir(base_dir, group_name), frontendConfig.history_file)
self.default_factory = default_factory
# cannot use collections.defaultdict directly
# since it is only supported starting python 2.5
self.data = {}
if load_on_init:
self.load()
def load(self, raise_on_error = False):
try:
fd = open(self.fname,'r')
try:
data = cPickle.load(fd)
finally:
fd.close()
except:
if raise_on_error:
raise
else:
# default to empty history on error
data = {}
if type(data) != type({}):
if raise_on_error:
raise TypeError, "History object not a dictionary: %s" % str(type(data))
else:
# default to empty history on error
data = {}
self.data = data
def save(self, raise_on_error = False):
try:
# there is no concurrency, so does not need to be done atomically
fd = open(self.fname, 'w')
try:
cPickle.dump(self.data, fd, cPickle.HIGHEST_PROTOCOL)
finally:
fd.close()
except:
if raise_on_error:
raise
#else, just ignore
def has_key(self, keyid):
return (keyid in self.data)
def __contains__(self, keyid):
return (keyid in self.data)
def __getitem__(self, keyid):
try:
return self.data[keyid]
except KeyError,e:
if self.default_factory is None:
raise # no default initialization, just fail
# i have the initialization function, use it
self.data[keyid] = self.default_factory()
return self.data[keyid]
def __setitem__(self, keyid, val):
self.data[keyid] = val
def __delitem__(self, keyid):
del self.data[keyid]
def empty(self):
self.data = {}
def get(self, keyid, defaultval=None):
return self.data.get(keyid, defaultval)
| bbockelm/glideinWMS | frontend/glideinFrontendConfig.py | Python | bsd-3-clause | 19,293 | 0.020577 |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol01:
def __init__(self):
self.isCellAlive = False
pass
def setAlive(self, width,height):
self.isCellAlive = True
return True
def isAlive(self):
return self.isCellAlive
def survives(self):
return False
if __name__ == '__main__':
pass | hemmerling/codingdojo | src/game_of_life/python_coderetreat_berlin_2014-09/python_legacycrberlin01/gol01.py | Python | apache-2.0 | 685 | 0.013139 |
from edc_base.model_mixins import BaseModel, ListModelMixin
class Cause (ListModelMixin, BaseModel):
class Meta:
ordering = ['display_index']
app_label = 'edc_death_report'
| botswana-harvard/edc-death-report | edc_death_report/models/cause.py | Python | gpl-2.0 | 196 | 0 |
import _plotly_utils.basevalidators
class VsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="vsrc", parent_name="cone", **kwargs):
super(VsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/cone/_vsrc.py | Python | mit | 426 | 0 |
# -*- coding: utf-8 -*-
from eclcli.common import command
from eclcli.common import exceptions
from eclcli.common import utils
from eclcli.identity import common as identity_common
from ..sssclient.common.utils import objectify
class SetAPIKeypair(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetAPIKeypair, self).get_parser(prog_name)
parser.add_argument(
'user_id',
metavar="<uuid>",
help=(""),
)
return parser
def take_action(self, parsed_args):
sss_client = self.app.client_manager.sss
user_id = parsed_args.user_id
keypair = sss_client.set_api_keypair(user_id)
columns = utils.get_columns(keypair)
obj = objectify(keypair)
data = utils.get_item_properties(obj, columns)
return (columns, data)
| anythingrandom/eclcli | eclcli/sss/v1/api_keypair.py | Python | apache-2.0 | 878 | 0.006834 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import print_function
'''
Collected utilities for pygame
It is difficult to write pixels directly in python.
There's some way to get a framebuffer back from Tk, but it is
cumberosme.
The module pygame supports sending pixel buffers,
which is wrapped for convneinece in this module.
example usage
import neurotools.graphics.pygame as npg
import time
import numpy as np
import pygame
K = 128
screen = npg.start(K,K,'Image data')
dt = 1/20
wait_til = time.time() + dt
print('Animating..')
for i in neurotools.tools.progress_bar(range(100)):
t = time.time()
if t<wait_til: time.sleep(wait_til-t)
wait_til = t + dt
npg.draw_array(screen, np.random.rand(K,K,3))
pygame.quit()
'''
import sys
import numpy as np
try:
import pygame as pg
except:
print('pygame package is missing; it is obsolete so this is not unusual')
print('pygame graphics will not work')
pg = None
def enable_vsync():
if sys.platform != 'darwin':
return
try:
import ctypes
import ctypes.util
ogl = ctypes.cdll.LoadLibrary(ctypes.util.find_library("OpenGL"))
# set v to 1 to enable vsync, 0 to disable vsync
v = ctypes.c_int(1)
ogl.CGLSetParameter(ogl.CGLGetCurrentContext(), ctypes.c_int(222), ctypes.pointer(v))
except:
print("Unable to set vsync mode, using driver defaults")
def start(W,H,name='untitled'):
# Get things going
pg.quit()
pg.init()
enable_vsync()
window = pg.display.set_mode((W,H))
pg.display.set_caption(name)
return window
def draw_array(screen,rgbdata,doshow=True):
'''
Send array data to a PyGame window.
PyGame is BRG order which is unusual -- reorder it.
Parameters
----------
screen : object
Object returned by neurotools.graphics.pygame.start
rgbdata :
RGB image data with color values in [0,1]
'''
# Cast to int
rgbdata = np.int32(rgbdata*255)
# clip bytes to 0..255 range
rgbdata[rgbdata<0]=0
rgbdata[rgbdata>255]=255
# get color dimension
if len(rgbdata.shape)==3:
w,h,d = rgbdata.shape
else:
w,h = rgbdata.shape
d=1
# repack color data in screen format
draw = np.zeros((w,h,4),'uint8')
if d==1:
draw[...,0]=rgbdata
draw[...,1]=rgbdata
draw[...,2]=rgbdata
draw[...,3]=255 # alpha channel
if d==3:
draw[...,:3]=rgbdata[...,::-1]
draw[...,-1]=255 # alpha channel
if d==4:
draw[...,:3]=rgbdata[...,-2::-1]
draw[...,-1]=rgbdata[...,-1]
# get surface and copy data to sceeen
surface = pg.Surface((w,h))
numpy_surface = np.frombuffer(surface.get_buffer())
numpy_surface[...] = np.frombuffer(draw)
del numpy_surface
screen.blit(surface,(0,0))
if doshow:
pg.display.update()
| michaelerule/neurotools | graphics/pygame.py | Python | gpl-3.0 | 3,004 | 0.018642 |
#!/usr/bin/python
from body3 import *
function_decl(link='extern',srcp='eval.c:216',
body=bind_expr(
body=statement_list(
E0=decl_expr(
ftype=void_type(algn='8',name='126')),
E1=decl_expr(
ftype=void_type(algn='8',name='126')),
E2=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:53',used='1',
name=identifier_node(string='need_here_doc')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
E3=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=void_type(algn='8',name='126')),
E4=cond_expr(
OP0=truth_andif_expr(
OP0=ne_expr(
OP0=var_decl(algn='32',srcp='shell.h:94',used='1',
name=identifier_node(string='interactive')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=ne_expr(
OP0=nop_expr(
OP0=component_ref(
OP0=var_decl(algn='64',srcp='input.h:89',used='1',
name=identifier_node(string='bash_input')),
OP1=field_decl(algn='32',srcp='input.h:82',
name=identifier_node(string='type'))),
ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')),
OP1=integer_cst(low='3',
ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')))),
OP1=statement_list(
E0=modify_expr(
OP0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
OP1=call_expr(
E0=nop_expr(
OP0=addr_expr(
OP0=pointer_type(algn='64'),
ftype=string_cst(string='PROMPT_COMMAND',
ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))),
ftype=pointer_type(algn='64',ptd='906',size='22')),
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=pointer_type(algn='64',ptd='9',size='22')),
ftype=pointer_type(algn='64',ptd='9',size='22')),
E1=cond_expr(
OP0=ne_expr(
OP0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
OP1=integer_cst(low='0',
ftype=pointer_type(algn='64',ptd='9',size='22')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
OP1=call_expr(
E0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
E1=nop_expr(
OP0=addr_expr(
OP0=pointer_type(algn='64'),
ftype=string_cst(string='PROMPT_COMMAND',
ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))),
ftype=pointer_type(algn='64',ptd='9',size='22')),
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='10721',link='extern',name='10720',srcp='input.h:105')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126')),
E2=cond_expr(
OP0=eq_expr(
OP0=var_decl(algn='32',srcp='eval.c:51',used='1',
name=identifier_node(string='running_under_emacs')),
OP1=integer_cst(low='2',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126'))),
ftype=void_type(algn='8',name='126')),
E5=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:54',used='1',
name=identifier_node(string='current_command_line_count')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
E6=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r')),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='2560',link='extern',name='12695',srcp='externs.h:104')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
E7=cond_expr(
OP0=ne_expr(
OP0=var_decl(algn='32',srcp='eval.c:53',used='1',
name=identifier_node(string='need_here_doc')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='5191',link='extern',name='10700',srcp='input.h:104')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126')),
E8=return_expr(
expr=modify_expr(
OP0=result_decl(algn='32',note='art:artificial',srcp='eval.c:216'),
OP1=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
ftype=void_type(algn='8',name='126'))),
ftype=void_type(algn='8',name='126'),
vars=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r'))),
name=identifier_node(string='parse_command')) | h4ck3rm1k3/gcc_py_introspector | data/body4.py | Python | gpl-2.0 | 6,023 | 0.028889 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Widgets for advanced display of files."""
import json
from django import http
from grr.gui import renderers
from grr.lib import utils
class HexView(renderers.TemplateRenderer):
"""Display a HexView of a file.
Internal State:
- aff4_path: The name of the aff4 object we are viewing now.
- age: The version of the AFF4 object to display.
"""
table_width = 32
total_size = 0
# The state of this widget.
state = {}
# This is the template used by the js to build the hex viewer html.
table_jquery_template = """
<script id="HexTableTemplate" type="text/x-jquery-tmpl">
<table class="monospace">
<tbody>
<tr id="hex_header" class="ui-state-default">
<th id="offset">offset</th>
<th id="data_column"></th>
</tr>
<tr>
<td id="offset_area">
<table>
</table>
</td>
<td id="hex_area">
<table>
</table>
</td>
<td id="data_area" class="data_area">
<table>
</table>
</td>
<td class='slider_area'><div id=slider></div></td>
</tr>
</tbody>
</table>
</script>
"""
layout_template = renderers.Template("""
<div id="{{unique|escape}}" style="position: absolute; top: 45px;
right: 0; bottom: 0; left: 0"></div> """ + table_jquery_template + """
<script>
$("#{{unique|escapejs}}").resize(function() {
grr.hexview.HexViewer("{{renderer|escapejs}}", "{{unique|escapejs}}",
{{this.table_width|escapejs}}, {{this.state_json|safe}});
});
$("#{{unique|escapejs}}").resize();
</script>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
self.state["age"] = request.REQ.get("age")
encoder = json.JSONEncoder()
self.state_json = encoder.encode(self.state)
return super(HexView, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Return the contents of the hex viewer in JSON."""
try:
row_count = int(request.REQ.get("hex_row_count", 10))
except ValueError:
row_count = 2
try:
offset = int(request.REQ.get("offset", 0))
except ValueError:
offset = 0
encoder = json.JSONEncoder()
data = [ord(x) for x in self.ReadBuffer(
request, offset, row_count * self.table_width)]
response = dict(offset=offset, values=data)
response["total_size"] = self.total_size
return http.HttpResponse(encoder.encode(response),
content_type="text/json")
def ReadBuffer(self, request, offset, length):
"""Should be overriden by derived classes to satisfy read requests.
Args:
request: The original request object.
offset: The offset inside the file we should read from.
length: The number of bytes to return.
Returns:
An array of integers between 0 and 255 corresponding to the bytes.
"""
return [x % 255 for x in xrange(offset, offset + length)]
class TextView(renderers.TemplateRenderer):
"""Display a TextView of a file."""
# The state of this widget.
state = {}
total_size = 0
default_codec = "utf_8"
allowed_codecs = ["base64_codec", "big5", "big5hkscs", "cp037", "cp1006",
"cp1026", "cp1140", "cp1250", "cp1251", "cp1252",
"cp1253", "cp1254", "cp1255", "cp1256", "cp1257",
"cp1258", "cp424", "cp437", "cp500", "cp737",
"cp775", "cp850", "cp852", "cp855", "cp856", "cp857",
"cp860", "cp861", "cp862", "cp863", "cp864", "cp865",
"cp866", "cp869", "cp874", "cp875", "cp932", "cp949",
"cp950" "idna", "rot_13", "utf_16", "utf_16_be",
"utf_16_le", "utf_32", "utf_32_be", "utf_32_le",
"utf_7", "utf_8", "utf_8_sig", "uu_codec", "zlib_codec"]
layout_template = renderers.Template("""
<div id="{{unique|escape}}">
<div id="text_viewer">
offset <input id="text_viewer_offset" name="offset" type=text value=0 size=6>
size <input id="text_viewer_data_size" name="text_data_size"
type=text value=0 size=6>
encoding <select id="text_encoding" name="text_encoding">
{% for encoder in this.allowed_codecs %}
<option value={{encoder|escape}}>{{encoder|escape}}</option>
{% endfor %}
</select>
<div id="text_viewer_slider"></div>
<div id="text_viewer_data" total_size=0>
<div id="text_viewer_data_content" total_size=0></div>
</div>
<script>
grr.textview.TextViewer("{{renderer|escapejs}}", "{{unique|escapejs}}",
"{{this.default_codec|escapejs}}",
{{this.state_json|safe}});
</script>
</div>
</div>
""")
action_template = renderers.Template("""
<div id="text_viewer_data_content" total_size="{{this.total_size|escape}}">
{% if this.error %}
<div class="errormsg">{{this.error|escape}}</div>
{% else %}
<pre class="monospace">
{{this.data|escape}}
</pre>
{% endif %}
</div>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
self.state["age"] = request.REQ.get("age")
encoder = json.JSONEncoder()
self.state_json = encoder.encode(self.state)
return super(TextView, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Return the contents of the text viewer."""
try:
self.data_size = int(request.REQ.get("data_size", 10000))
self.offset = int(request.REQ.get("offset", 0))
except ValueError:
self.error = "Invalid data_size or offset given."
return renderers.TemplateRenderer.Layout(self, request, response,
self.action_template)
text_encoding = request.REQ.get("text_encoding", self.default_codec)
try:
buf = self.ReadBuffer(request, self.offset, self.data_size)
self.data = self._Decode(text_encoding, buf)
except RuntimeError as e:
self.error = "Failed to decode: %s" % utils.SmartStr(e)
return renderers.TemplateRenderer.Layout(self, request, response,
self.action_template)
def _Decode(self, codec_name, data):
"""Decode data with the given codec name."""
if codec_name not in self.allowed_codecs:
raise RuntimeError("Invalid encoding requested.")
try:
return data.decode(codec_name, "replace")
except LookupError:
raise RuntimeError("Codec could not be found.")
except AssertionError:
raise RuntimeError("Codec failed to decode")
def ReadBuffer(self, request, offset, length):
"""Should be overriden by derived classes to satisfy read requests.
Args:
request: The original request object.
offset: The offset inside the file we should read from.
length: The number of bytes to return.
Returns:
An array of integers between 0 and 255 corresponding to the bytes.
"""
return "".join(x % 255 for x in xrange(offset, offset + length))
| spnow/grr | gui/plugins/fileview_widgets.py | Python | apache-2.0 | 7,084 | 0.005364 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import nova.conf
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class IoOpsFilter(filters.BaseHostFilter):
"""Filter out hosts with too many concurrent I/O operations."""
RUN_ON_REBUILD = False
def _get_max_io_ops_per_host(self, host_state, spec_obj):
return CONF.filter_scheduler.max_io_ops_per_host
def host_passes(self, host_state, spec_obj):
"""Use information about current vm and task states collected from
compute node statistics to decide whether to filter.
"""
num_io_ops = host_state.num_io_ops
max_io_ops = self._get_max_io_ops_per_host(
host_state, spec_obj)
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
"is set to %(max_io_ops)s",
{'host_state': host_state,
'max_io_ops': max_io_ops})
return passes
class AggregateIoOpsFilter(IoOpsFilter):
"""AggregateIoOpsFilter with per-aggregate the max io operations.
Fall back to global max_io_ops_per_host if no per-aggregate setting found.
"""
def _get_max_io_ops_per_host(self, host_state, spec_obj):
max_io_ops_per_host = CONF.filter_scheduler.max_io_ops_per_host
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'max_io_ops_per_host')
try:
value = utils.validate_num_values(
aggregate_vals, max_io_ops_per_host, cast_to=int)
except ValueError as e:
LOG.warning("Could not decode max_io_ops_per_host: '%s'", e)
value = max_io_ops_per_host
return value
| rahulunair/nova | nova/scheduler/filters/io_ops_filter.py | Python | apache-2.0 | 2,476 | 0 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import unittest
from copy import deepcopy
from parameterized import parameterized
from airflow.contrib.operators.ecs_operator import ECSOperator
from airflow.exceptions import AirflowException
from tests.compat import mock
RESPONSE_WITHOUT_FAILURES = {
"failures": [],
"tasks": [
{
"containers": [
{
"containerArn":
"arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868",
"lastStatus": "PENDING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55"
}
],
"desiredStatus": "RUNNING",
"lastStatus": "PENDING",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55",
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11"
}
]
}
class TestECSOperator(unittest.TestCase):
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def setUp(self, aws_hook_mock):
self.aws_hook_mock = aws_hook_mock
self.ecs_operator_args = {
'task_id': 'task',
'task_definition': 't',
'cluster': 'c',
'overrides': {},
'aws_conn_id': None,
'region_name': 'eu-west-1',
'group': 'group',
'placement_constraints': [{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}],
'network_configuration': {
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc'],
'subnets': ['subnet-123456ab']
}
}
}
self.ecs = ECSOperator(**self.ecs_operator_args)
def test_init(self):
self.assertEqual(self.ecs.region_name, 'eu-west-1')
self.assertEqual(self.ecs.task_definition, 't')
self.assertEqual(self.ecs.aws_conn_id, None)
self.assertEqual(self.ecs.cluster, 'c')
self.assertEqual(self.ecs.overrides, {})
self.assertEqual(self.ecs.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.ecs.template_fields, ('overrides',))
@parameterized.expand([
['EC2', None],
['FARGATE', None],
['EC2', {'testTagKey': 'testTagValue'}],
])
@mock.patch.object(ECSOperator, '_wait_for_task_ended')
@mock.patch.object(ECSOperator, '_check_success_task')
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def test_execute_without_failures(self, launch_type, tags, aws_hook_mock,
check_mock, wait_mock):
client_mock = aws_hook_mock.return_value.get_client_type.return_value
client_mock.run_task.return_value = RESPONSE_WITHOUT_FAILURES
ecs = ECSOperator(launch_type=launch_type, tags=tags, **self.ecs_operator_args)
ecs.execute(None)
aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs',
region_name='eu-west-1')
extend_args = {}
if launch_type == 'FARGATE':
extend_args['platformVersion'] = 'LATEST'
if tags:
extend_args['tags'] = [{'key': k, 'value': v} for (k, v) in tags.items()]
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType=launch_type,
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t',
group='group',
placementConstraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
networkConfiguration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc'],
'subnets': ['subnet-123456ab']
}
},
**extend_args
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(ecs.arn,
'arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55')
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
resp_failures = deepcopy(RESPONSE_WITHOUT_FAILURES)
resp_failures['failures'].append('dummy error')
client_mock.run_task.return_value = resp_failures
with self.assertRaises(AirflowException):
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs',
region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType='EC2',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t',
group='group',
placementConstraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
networkConfiguration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc'],
'subnets': ['subnet-123456ab'],
}
}
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
self.ecs._wait_for_task_ended()
client_mock.get_waiter.assert_called_once_with('tasks_stopped')
client_mock.get_waiter.return_value.wait.assert_called_once_with(
cluster='c', tasks=['arn'])
self.assertEqual(
sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'lastStatus': 'STOPPED',
'exitCode': 1
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is not in success state ", str(e.exception))
self.assertIn("'name': 'foo'", str(e.exception))
self.assertIn("'lastStatus': 'STOPPED'", str(e.exception))
self.assertIn("'exitCode': 1", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'PENDING'
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is still pending ", str(e.exception))
self.assertIn("'name': 'container-name'", str(e.exception))
self.assertIn("'lastStatus': 'PENDING'", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_multiple(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'exitCode': 1
}, {
'name': 'bar',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_host_terminated_raises(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'stoppedReason': 'Host EC2 (instance i-1234567890abcdef) terminated.',
"containers": [
{
"containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868", # noqa: E501
"lastStatus": "RUNNING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55" # noqa: E501
}
],
"desiredStatus": "STOPPED",
"lastStatus": "STOPPED",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55", # noqa: E501
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11" # noqa: E501
}]
}
with self.assertRaises(AirflowException) as e:
self.ecs._check_success_task()
self.assertIn(
"The task was stopped because the host instance terminated:",
str(e.exception))
self.assertIn("Host EC2 (", str(e.exception))
self.assertIn(") terminated", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_task_not_raises(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
if __name__ == '__main__':
unittest.main()
| Fokko/incubator-airflow | tests/contrib/operators/test_ecs_operator.py | Python | apache-2.0 | 11,678 | 0.001199 |
#!/usr/bin/env python
import re
import os
import sys
import math
import argparse
def count_deflines(fastafile):
"counts number of sequences are in a fasta file"
fasta_h = open(fastafile, 'rb')
counter = 0
for line in fasta_h:
if re.search('^>', line) is not None:
counter += 1
fasta_h.close()
return counter
def split_fasta(fastafile, numfiles):
"splits fastafile into numfiles even sized fastafiles"
numseqs = count_deflines(fastafile)
seqlimit = math.ceil( 1. * numseqs / numfiles ) # num seqs per split file
fasta_h = open(fastafile, 'rb')
line = ''
for f in range(numfiles):
filepref = os.path.splitext(fastafile)[0]
fasta_f = open('.'.join([filepref,str(f),'fasta']), 'w')
counter = 0
fasta_f.write(line)
for line in fasta_h:
if re.search('^>', line) is not None:
counter += 1
if counter == seqlimit:
break
fasta_f.write(line)
fasta_f.close()
fasta_h.close
def blastall(fastafile, numfiles, database, blastype='blastp'):
"does blast of split fastafiles against database"
for f in range(numfiles):
filepref = os.path.splitext(fastafile)[0]
fasta_f = '.'.join([filepref,str(f),'fasta'])
cmd = blastype + ' -db ' + database + \
' -query ' + fasta_f + \
' -outfmt 6 -out ' + filepref + '.' + str(f) + '.blastp.tsv &'
os.system(cmd)
# better tracking of this could be achieved using os.fork(), dropping the & and
# then recombining the files, but this is beyond my current abilities
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Speeds up all v all blastp search")
# input options
parser.add_argument("-I", "--input_file", type=str, help="The peptide fasta file (query file)")
parser.add_argument("-D", "--database", type=str, help="The blast database to use (target db)")
parser.add_argument("-b", "--blast_type", type=str, default='blastp',
help="The blast algorithm to use. (default = blastp)")
parser.add_argument("-p", "--num_threads", type=int, default=1,
help="number of threads to distribute blast over")
args = parser.parse_args()
## parse files to set the working directory for saving files
# parse input file:
fullname = os.path.realpath(args.input_file)
filename = os.path.basename(args.input_file)
filepath = os.path.dirname(os.path.realpath(args.input_file))
# parse database path:
dbfull = os.path.realpath(args.database)
# parse blast output name and dir:
filepref = os.path.splitext(fullname)[0]
print "splitting %s into %d files..." % (filename, args.num_threads)
split_fasta(fullname, args.num_threads)
print "split fasta files saved in dir: %s" % (filepath)
print "running blastp for all files"
print "results saved as %s.##.blastp.tsv" % (filepref)
blastall(fullname, args.num_threads, dbfull, blastype=args.blast_type)
| oxpeter/small_fry | blastfaster.py | Python | gpl-2.0 | 3,115 | 0.006742 |
import json
from datetime import datetime
from django.conf import settings
from django.core.paginator import Paginator
import requests
from rest_framework import generics
from rest_framework import views
from rest_framework.pagination import PaginationSerializer
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer, XMLRenderer
from rest_framework.response import Response
from rest_framework.reverse import reverse
from companies.models import Company
from quotes.models import Quote
from api.renderers import QuoteCSVRenderer
from api.serializers import QuoteSerializer
class QuoteListView(generics.ListAPIView):
"""
Returns a list of end-of-day quotes from the PSE
### Parameters
- **stocks** - A comma separated list of stock symbols
- **from_date** - Start date of end-of-day quotes. This is inclusive. **Format: YYYY-MM-DD**
- **to_date** - End date of end-of-day quotes. This is exclusive. **Format: YYYY-MM-DD**
*NOTE: All the parameters are not required. When neither `from_date` and `to_date` are provided,
the API returns the quotes from the latest available date.*
### Examples
Get the latest available end-of-day quote for a company
GET /api/quotes/?stocks=BDO
Get the latest available end-of-day quote for multiple companies
GET /api/quotes/?stocks=BDO,BPI,MBT
Get all available end-of-day quotes for all companies starting from the `from_date`
GET /api/quotes/?from_date=2014-04-07
Get all available end-of-day quotes for all companies starting until the `end_date`
GET /api/quotes/?to_date=2014-04-07
Get all available end-of-day quotes for all companies between from the `from_date`, until the `end_date`
GET /api/quotes/?from_date=2014-04-07&to_date=2014-11-11
"""
serializer_class = QuoteSerializer
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, XMLRenderer, QuoteCSVRenderer)
def get_queryset(self):
items = Quote.objects.all()
stocks = self.request.QUERY_PARAMS.get('stocks')
from_date = self.request.QUERY_PARAMS.get('from_date')
to_date = self.request.QUERY_PARAMS.get('to_date')
self.csv_filename = 'quotes_'
if stocks is not None:
stocks = stocks.split(',')
stocks = [x.upper() for x in stocks]
self.csv_filename += '_'.join(stocks) + '_'
items = items.filter(company__symbol__in=stocks)
if from_date is None and to_date is None:
latest_quote_date = Quote.objects.latest('quote_date').quote_date
self.csv_filename += latest_quote_date.strftime('%Y-%m-%d')
items = items.filter(quote_date=latest_quote_date)
elif from_date == to_date:
self.csv_filename += from_date
quote_date = datetime.strptime(from_date, '%Y-%m-%d')
items = items.filter(quote_date=quote_date)
else:
if from_date is not None:
self.csv_filename += 'from_' + from_date
from_date = datetime.strptime(from_date, '%Y-%m-%d')
items = items.filter(quote_date__gte=from_date)
if to_date is not None:
prefix = '_' if from_date is not None else ''
self.csv_filename += prefix + 'to_' + to_date
to_date = datetime.strptime(to_date, '%Y-%m-%d')
items = items.filter(quote_date__lt=to_date)
return items.order_by('quote_date', '-company__is_index', 'company__symbol')
def list(self, request, *args, **kwargs):
response = super(generics.ListAPIView, self).list(request, args, kwargs)
ret_format = self.request.QUERY_PARAMS.get('format')
if ret_format == 'csv':
filename = self.csv_filename + '.csv'
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
return response
class TickerView(views.APIView):
"""
Provides a near-realtime endpoint for quotes
### Parameters
- **stocks** - A comma separated list of stock symbols
### Examples
Get the latest available end-of-day quote for a company
GET /api/quotes/?stocks=BPI
"""
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, XMLRenderer)
def get(self, request):
r = requests.get(settings.TICKER_URL)
response = json.loads(r.content)
data = {}
items = []
stocks = self.request.QUERY_PARAMS.get('stocks')
if stocks is not None:
stocks = stocks.split(',')
stocks = [x.upper() for x in stocks]
for item in response:
if item['securitySymbol'] == 'Stock Update As of':
as_of = item['securityAlias']
as_of = datetime.strptime(as_of, '%m/%d/%Y %I:%M %p')
data['as_of'] = as_of.strftime('%Y-%m-%d %I:%M%p')
else:
quote = {}
quote['symbol'] = item['securitySymbol'].upper()
if Company.objects.filter(symbol=quote['symbol']).count() != 0:
quote['name'] = Company.objects.get(symbol=quote['symbol']).name
else:
quote['name'] = item['securityAlias'].title()
quote['percent_change'] = item['percChangeClose']
quote['price'] = item['lastTradedPrice']
quote['volume'] = item['totalVolume']
quote['indicator'] = item['indicator']
if stocks is not None:
if quote['symbol'] in stocks:
items.append(quote)
else:
items.append(quote)
data['quotes'] = items
return Response(data)
class DailyQuotesDownloadView(views.APIView):
paginate_by = 50
def get(self, request):
base_url = reverse('api_quotes_list', request=request)
page_num = self.request.QUERY_PARAMS.get('page', 1)
quote_dates = Quote.objects.order_by('-quote_date').values_list('quote_date', flat=True).distinct()
paginator = Paginator(quote_dates, self.paginate_by)
page = paginator.page(page_num)
items = []
for obj in page.object_list:
date_string = obj.strftime('%Y-%m-%d')
item = {
'quote_date': date_string,
'csv_url': self.generate_download_url(base_url, date_string, 'csv'),
'json_url': self.generate_download_url(base_url, date_string, 'json'),
'xml_url': self.generate_download_url(base_url, date_string, 'xml'),
}
items.append(item)
page.object_list = items
serializer = PaginationSerializer(instance=page, context={'request': request})
data = serializer.data
return Response(data)
def generate_download_url(self, base_url, quote_date, format_type):
return '{0}?from_date={1}&to_date={1}&format={2}'.format(base_url, quote_date, format_type)
| rodxavier/open-pse-initiative | django_project/api/views/quotes_views.py | Python | mit | 7,113 | 0.00478 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import EmailUser as User
from .forms import AdminUserChangeForm, UserCreationForm
@admin.register(User)
class UserAdmin(UserAdmin):
fieldsets = (
(
None,
{'fields': ('email', 'password')}
),
(
_('Personal info'),
{
'fields': (
'name', 'auth_number',
),
},
),
(
_('Permissions'),
{
'fields': (
'verified', 'is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions',
),
},
),
(
_('Important dates'),
{'fields': ('last_login', 'date_joined')},
),
)
add_fieldsets = (
(
None, {
'classes': ('wide',),
'fields': (
'email', 'password1', 'password2',
'name', 'verified',
),
},
),
)
form = AdminUserChangeForm
add_form = UserCreationForm
list_display = ('pk', 'email', 'name', 'is_staff')
list_display_links = ('email',)
list_filter = (
'verified', 'is_active', 'is_staff', 'is_superuser', 'groups',
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
| ccwang002/biocloud-server-kai | src/users/admin.py | Python | mit | 1,557 | 0 |
#!/usr/bin/env python
# Abraxas Collaborative Password Utility Documentation
#
# Converts a restructured text version of the manpages to nroff.
# License {{{1
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# Imports {{{1
from docutils.core import publish_string
from docutils.writers import manpage
from textwrap import dedent
from abraxas.prefs import SEARCH_FIELDS
from abraxas.version import DATE, VERSION
# Program Manpage {{{1
PROGRAM_MANPAGE = {
'name': 'abraxas',
'sect': '1',
'contents': r"""{
=========
abraxas
=========
------------------------------
collaborative password utility
------------------------------
:Author: Kale and Ken Kundert <abraxas@nurdletech.com>
:Date: {date}
:Version: {version}
:Manual section: 1
.. :Copyright: Kale and Ken Kundert
.. :Manual group: Utilities
SYNOPSIS
========
**abraxas** [*options*] [*account*]
OPTIONS
=======
-P, --password Output the password (default if nothing else is
specified).
-N, --username Output the username.
-Q <N>, --question <N> Output the answer to security question *N*.
-A, --account-number Output the account number.
-E, --email Output the email associated with this account.
-U, --url Output the website address.
-R, --remarks Output remarks.
-i, --info Output all account information except the
secrets (the password and the answers to the
security questions).
-a, --all Same as --info except also output the password.
-q, --quiet Disable all non-essential output.
-c, --clipboard Write output to clipboard rather than stdout.
-t, --autotype Mimic a keyboard to send output to the active
window rather than stdout. In this case any
command line arguments that specify what to
output are ignored and the *autotype* entry
directs what is to be output.
-f <str>, --find <str> List any account that contains the given string
in its ID.
-s <str>, --search <str>
List any account that contains the given string
in {search_fields}, or its ID.
-S, --stateless Do not use master password or accounts file.
-T <template>, --template <template>
Template to use if account is not found.
-b, --default-browser Open account in the default browser.
-B <browser>, --browser <browser>
Open account in the specified browser.
-n, --notify Output messages to notifier.
-l, --list List available master passwords and templates
(only pure templates are listed, not accounts,
even though accounts can be used as templates)
-w <secs>, --wait <secs>
Wait this log before clearing the secret (use
0 to disable clearing).
--archive Archive all the secrets to
~/.config/abraxas/archive.gpg.
--changed Identify all the secrets that have changed since
last archived.
-I <GPG-ID>, --init <GPG-ID>
Initialize the master password and accounts
files in ~/.config/abraxas (but only if they do
not already exist).
-h, --help Show a brief summary of available command line
options.
DIAGNOSTICS
===========
A log file is created in ~/.config/abraxas/log (the location of this
file can be specified in the *log_file* variable in the accounts file).
DESCRIPTION
===========
Abraxas is password utility that can store or generate your passwords
and produce them from the command line. It can also be configured to
autotype your username and password into the current window so that you
can log in with a simple keystroke.
Abraxas is capable of generating two types of passwords, character based
(pass words) or word based (pass phrases). Pass phrases are generally
preferred if you have a choice, but many websites will not take them.
The benefit of pass phrases is that they are relatively easy to remember
and type, and they are very secure. The pass phrases generated by
Abraxas generally consist of four words, each word is drawn from
a dictionary of 10,000 words. Thus, even if a bad guy knew that four
lower case words were being used for your pass phrase, there are still
10,000,000,000,000,000 possible combinations for him to try (this
represents a minimum entropy of 53 bits). Using six words results in 80
bits of entropy, which meets the threshold recommended by NIST for the
most secure pass phrases. For more on this, see 'How Much Entropy is
Enough' below.
For another perspective on the attractiveness of pass phrases, see
`<http://xkcd.com/936/>`_.
Unlike password vaults, Abraxas produces a highly unpredictable password
from a master password and the name of the account for which the
password is to be used. The process is completely repeatable. If you
give the same master password and account name, you will get the same
password. As such, the passwords do not have to be saved; instead they
are regenerated on the fly.
As a password generator, Abraxas provides three important advantages
over conventional password vaults. First, it allows groups of people to
share access to accounts without having to securely share each password.
Instead, one member of the group creates a master password that is
securely shared with the group once. From then on any member of the
group can create a new account, share the name of the account, and all
members will know the password needed to access the account. The second
advantage is that it opens up the possibility of using high-quality
passwords for stealth accounts, which are accounts where you remember
the name of the account but do not store any information about even the
existence of the account on your computer. With Abraxas, you only need
to remember the name of the account and it will regenerate the password
for you. This is perfect for your TrueCrypt hidden volume password.
Finally, by securely storing a small amount of information, perhaps on
a piece of paper in your safe-deposit box, you can often recover most if
not all of your passwords even if you somehow lose your accounts file.
You can even recover passwords that were created after you created your
backup. This is because Abraxas combines the master password with some
easily reconstructed information, such as the account name, to create
the password. If you save the master password, the rest should be
recoverable.
To use it, one creates a file that contains information about each of
his or her non-stealth accounts. Among that information would be
information that controls how the passwords are generated. This file is
generally not encrypted, though you can encrypt it if you like).
Another file is created that contains one or more master passwords.
This file is always GPG encrypted.
The intent is for these files to not include the passwords for your
accounts. Rather, the passwords are regenerated when needed from the
account information and from the master password. This makes it easy to
share passwords with others without having to pass the passwords back
and forth. It is only necessary to create a shared master password in
advance. Then new passwords can be created on the fly by either party.
Basic Use
+++++++++
To generate a password for an account that exists in your accounts file,
you would use::
abraxas <account>
where <account> is the name of your account. For example, to get your
gmail password you might use::
$ abraxas gmail
PASSWORD: preview secretary eschew cobra
The $ represents the shell prompt, you do not type it.
The password generator is also capable of generating answers to the
security questions that are the bane of most websites these days.
Simply add the questions to your accounts file and the password
generator will produce an unpredictable yet consistent and easily
communicated response for each question. For example::
$ abraxas -q0 gmail
name of elementary school: balaclava essence guildhall persimmon
There is a very good reason not to actually provide your personal
information as a response to these questions. Often it is friends and
family members that are the most likely to attempt to surreptitiously
access your account. As with most crime, it comes down to opportunity
and motive. The opportunity comes from the fact that they know you and
so are more likely to know the information, like the answers to these
security questions, that allows them access to your account. The motive
generally comes eventually. It is hard to live one's life without
angering a friend or family member at some point, and then they may feel
justified in helping themselves to your accounts.
Abraxas outputs account information upon request. It is a command line
program, so you just specify the right command line options to have it
print out the username, account number, url, etc.::
$ abraxas -i gmail
USERNAME: derrickAsh
EMAIL: derrick.ash@yahoo.com
URL: https://accounts.google.com
The output can be produced in three different ways.
The first is that it is simply displayed on standard output. It tries to
keep the secret information (such as the password and answers to the
security questions) secure by displaying it for a minute and then
erasing it. The program continues to run while the password is
displayed. To clear the password early, just kill the program by typing
Ctrl-C.
The second way is to send it to the clipboard. For security reasons, the
clipboard is cleared after a minute.
Finally, the password generator can output the information by mimicking
the keyboard and 'typing' it to active window. This is referred to as
'autotype'.
Account Discovery
+++++++++++++++++
If no account is specified, Abraxas examines the window title and from
it tries to determine which account to use. In its most simple form
window titles can be specified in the accounts, and the account with the
matching title is used. Multiple title strings can be associated with
each account, and those strings support globbing. In addition, Abraxas
can sometimes recognize components of the window title, components such
as the URL, the protocol, etc., and it can compare those component to
fields in the account to determine which account to use. In particular,
Abraxas comes with the ability to recognize the title components created
by 'Hostname in Titlebar', an add-on to Firefox that puts the URL and
protocol in the title bar (with Chrome, use 'Url in Title').
If the title matches multiple accounts, a dialog box opens with the list
of each of those accounts. Use the up or *k* and down or *j* keys to
navigate to the account you want and select it with *Enter* or *Return*.
You can cancel using *Esc*.
The combination of autotype and account discovery is very powerful if
you configure your window manager to run Abraxas because it makes it
possible to login to websites and such with a single keystroke.
Autotype can sometimes be a bit problematic. Some programs can
occasionally stubbornly ignore particular autotyped characters,
particularly $ and newline. This can occur with Firefox, whereas in
those cases it did not occur with Chrome. If this affects you, you
might want to simply remove $ from your character set for your
passwords (newline is not as problematic as it generally occurs last,
and so can be added by hand).
Security
++++++++
The accounts file can be a simple ASCII file that contains somewhat
sensitive information. From this file one could infer the existence of
an account and would have some identifying information such as the
username and account number, but the passwords themselves are not
contained in the file, only the parameters of the passwords (how many
characters, the alphabet used, etc). Because the file is somewhat
sensitive, it is recommended that it should be readable only by the
user. If you are uncomfortable with this level of protection, you can
further protect the accounts file by encrypting it. To do so, run::
$ cd ~/.config/abraxas
$ gpg --armor --encrypt --recipient <your-gpg-id> accounts
$ shred -u accounts
In some cases the mere existence of this file, even though encrypted,
may be problematic. Once discovered, authorities may compel you hand
over the decryption keys, which would expose the existence of all of
your accounts and provide access to each of them.
It is possible to generate passwords for accounts that are not described
in the accounts file. As such, these 'stealth' accounts are more secure
since no information is retained that refers to these accounts; they
provide plausible deniability. To generate a password or pass phrase for
such an account you would simply give the name of the account on the
command line. For example::
$ abraxas my-secret-account
warning: account 'my-secret-account' not found.
PASSWORD: apologist imprint epigram return
You would need to remember the name of the account precisely. If you
give even a slightly different account name you will get a different
password. In this case Abraxas generates a password with the default
settings, which is actually a 4 word pass phrase, which most websites
reject. You can indicate that Abraxas should generate an actual
password by giving the name of a template. A template is simply a named
collection of attributes that specify how to generate the password. You
may configure as many templates as you wish. By default, Abraxas comes
with eight templates:
=words:
A sequence of random English words. The default is to use 4 words,
which provides 53 bits of entropy.
=chars:
A sequence of random letters (upper and lower case), digits and
symbols. The default is to use 12 characters, which provides 79 bits
of entropy.
=pin:
A sequence of random digits. The default is to use 4 digits, which
provides 13 bits of entropy. This is typically used for PIN
numbers.
=num:
A sequence of random digits. The default is to use 8 digits, which
provides 26 bits of entropy. This is also used for PIN numbers, but
it provides better security.
=word:
A single random word. Chosen from a list of 10,000 words, this is
equivalent to a 4 digit PIN, but is easier to remember. It provides
13 bits of entropy.
=anum:
A sequence of easily distinguishable random letters. The letters may
be both upper and lower case, but will not include any letters that
are easily confused with other letters or digits (Il1O0). Typically
used for web passwords. The default is to use 12 characters, which
provides 78 bits of entropy.
=master:
A sequence of random English words. The default is to use 8 words,
which provides 106 bits of entropy.
=extreme:
A sequence of random letters (upper and lower case), digits and
symbols. The default is to use 64 characters, which provides 420
bits of entropy.
You can generate a pass word (a collection of characters) instead of
a pass phrase (a collection of words) for a stealth account with::
$ abraxas -T =anum my-secret-account
warning: account 'my-secret-account' not found.
PASSWORD: Rkybp9EFXLu4
It is possible to take this one step further. Specifying the ``-S`` or
``--stateless`` command line option instructs Abraxas to avoid using any
saved information when generating the password. In this situation, you
must give both the account name (on the command line) and the master
password. As long as you use a master password or pass phrase that is
memorable for you but difficult for everyone else to guess, you should
be reasonably safe from someone figuring out your password even if they
have full access to your private GPG keys and your Abraxas files. For
example::
$ abraxas --stateless my-secret-account
Provide master password for account 'my-secret-account'.
Password: my-secret-master-passphrase
PASSWORD: toehold physical illusion washroom
When running in stateless mode you do not have access to any templates
you may have created in your accounts file because that file is ignored,
but you have access to the predefined templates described above::
$ abraxas -S -T =anum my-secret-account
Provide master password for account 'my-secret-account'.
Password: my-secret-master-passphrase
PASSWORD: LfCkPFygucg9
GPG Security
++++++++++++
Abraxas inherits the security policies of GPG. It is important to
recognize that any weakness in your GPG security policy could result in
your passwords being exposed. For example, if you enter your GPG pass
phrase into your agent and it is retained while you walk away from your
computer, then someone could use this program to access all of your
passwords (with access to your accounts file, they would have everything
they needed to know to break into each of your accounts). Thus, it is
important to both carefully consider your use of the GPG agent and it's
password retention time. It is also important that you dutifully use
screen locking when you walk away from your computer.
Archiving
+++++++++
There are features in Abraxas that could allow you to inadvertently and
unknowingly change the passwords that are generated for an account. For
example, changing the master password would change the passwords for all
accounts that linked to that master password. Similarly, changing the
definition of a template would change the passwords for all accounts
that employ that template. To avoid this, Abraxas allows you to quickly
tell whether the passwords for any known account has changed. To use
this feature, you must first archive your secrets.
You generate an archive of the secrets for all of the known accounts
with::
abraxas --archive
The resulting archive is encrypted and saved in your settings directory
(~/.config/abraxas/archive.gpg). In addition, you can check your current
list of secrets against those in the archive with::
abraxas --changed
It is a good idea to do this when you have change your master password
or accounts files and when you have update your version of Abraxas.
Doing so will alert you to any unexpected changes. It is also
recommended that you always confirm you only see the changes you expect
before updating the archive.
How it Works
++++++++++++
A secret such as a password or the answer to a security question starts
out as the simple stringing together of a few things: the account name,
the version, and the master password. For security questions, the
question itself is added as well. This combined string is then hashed
into a very long number. Even the smallest change in any of the
components used to create it results in a very different number. The
number is then mapped into pass phrases or passwords with your choice of
words or characters. As long the master password is kept secure, this
approach is very safe. Even knowing the algorithm and having access to
the source code of the Abraxas program would not allow someone to
predict your passwords.
Getting Started
+++++++++++++++
Before using Abraxas you must have a GPG identity (a public/private key
pair tagged to an email account). In addition, it is recommended that
you run gpg-agent (add 'gpg-agent' alone on a line into your
~/.gnupg/gpg.conf file and then start the agent). Then you must create
your accounts and master password file. To do so, run::
$ abraxas -I <gpg-id>
For example, if your GPG identity is linked to derrickAsh@gmail.com,
then use::
$ abraxas -I derrickAsh@gmail.com
This creates two files if they do not already exist,
~/.config/abraxas/master.gpg and ~/.config/abraxas/accounts. Of the two,
the master.gpg file is encrypted. If you would like the accounts file to
be encrypted as well, encrypt it now using::
$ gpg --armor --encrypt --recipient <gpg-id> accounts
To make it easy to change an encrypted file, it is recommended that you
download and install the gpg plugin for vim, which can be found at
http://www.vim.org/scripts/script.php?script_id=3645. The file you will
download is named gnupg.vim, simply move it into ~/.vim/plugin. Once
you have done this, edit the file with vim or gvim. It should ask you
for the GPG pass phrase associated with the GPG identity you specified.
Once you have entered it you can edit the file.
Then if desired, you can edit the accounts file and add an account. See
'man 5 abraxas' for information about all of the fields that Abraxas
uses. For example, to add your gmail and bank accounts, you would add
something like the following to your accounts file::
accounts = {{
<skip over the templates at the start>
"chase": {{
'template': "=chars",
'username': "derrickash",
'account': "6478-4789874",
'email': "derrickAsh@gmail.com",
'url': "https://chaseonline.chase.com",
}},
"gmail": {{
'template': "=words",
'username': "derrickAsh",
'email': "derrick.ash@yahoo.com",
'url': "https://accounts.google.com",
'security questions': [
"name of elementary school",
],
'window': [
'Google Accounts*',
'Gmail*',
],
'autotype': "{{username}}{{tab}}{{password}}{{return}}",
}},
}}
These fields are described in detail in abraxas(5).
How Much Entropy is Enough
++++++++++++++++++++++++++
A 4 word Abraxas password provides 53 bits of entropy, which seems like
a lot, but NIST is recommending 80 bits for your most secure passwords.
So, how much is actually required. It is worth exploring this question.
Entropy is a measure of how hard the password is to guess. Specifically,
it is the base two logarithm of the likelihood of guessing the password
in a single guess. Every increase by one in the entropy represents
a doubling in the difficulty of guessing your password. The actual
entropy is hard to pin down, so generally we talk about the minimum
entropy, which is the likelihood of an adversary guessing the password
if he or she knows everything about the scheme used to generate the
password but does not know the password itself. So in this case the
minimum entropy is the likelihood of guessing the password if it is
known that we are using 4 space separated words as our pass phrase.
This is very easy to compute. There are roughly 10,000 words in our
dictionary, so if there was only one word in our pass phrase, the chance
of guessing it would be one in 10,000 or 13 bits of entropy. If we used
a two word pass phrase the chance of guessing it in a single guess is
one in 10,000*10,000 or one in 100,000,000 or 26 bits of entropy.
The probability of guessing our pass phrase in one guess is not our
primary concern. Really what we need to worry about is given
a determined attack, how long would it take to guess the password. To
calculate that, we need to know how fast our adversary could try
guesses. If they are trying guesses by typing them in by hand, their
rate is so low, say one every 10 seconds, that even a one word pass
phrase may be enough to deter them. Alternatively, they may have
a script that automatically tries pass phrases through a login
interface. Again, generally the rate is relatively slow. Perhaps at
most the can get is 1000 tries per second. In this case they would be
able to guess a one word pass phrase in 10 seconds and a two word pass
phrase in a day, but a 4 word pass phrase would require 300,000 years to
guess in this way.
The next important thing to think about is how your password is stored
by the machine or service you are logging into. The worst case situation
is if they save the passwords in plain text. In this case if someone
were able to break in to the machine or service, they could steal the
passwords. Saving passwords in plain text is an extremely poor practice
that was surprisingly common, but is becoming less common as companies
start to realize their liability when their password files get stolen.
Instead, they are moving to saving passwords as hashes. A hash is
a transformation that is very difficult to reverse, meaning that if you
have the password it is easy to compute its hash, but given the hash it
is extremely difficult to compute the original password. Thus, they save
the hashes (the transformed passwords) rather than the passwords. When
you log in and provide your password, it is transformed with the hash
and the result is compared against the saved hash. If they are the same,
you are allowed in. In that way, your password is no longer available to
thieves that break in. However, they can still steal the file of hashed
passwords, which is not as good as getting the plain text passwords, but
it is still valuable because it allows thieves to greatly increase the
rate that they can try passwords. If a poor hash was used to hash the
passwords, then passwords can be tried at a very high rate. For
example, it was recently reported that password crackers were able to
try 8 billion passwords per second when passwords were hashed with the
MD5 algorithm. This would allow a 4 word pass phrase to be broken in 14
days, whereas a 6 word password would still require 4,000,000 years to
break. The rate for the more computational intensive sha512 hash was
only 2,000 passwords per second. In this case, a 4 word pass phrase
would require 160,000 years to break.
In most cases you have no control over how your passwords are stored on
the machines or services that you log into. Your best defense against
the notoriously poor security practices of most sites is to always use
a unique password for sites where you are not in control of the secrets.
For example, you might consider using the same pass phrase for you login
password and the pass phrase for an ssh key on a machine that you
administer, but never use the same password for two different websites
unless you do not care if the content of those sites become public.
So, if we return to the question of how much entropy is enough, you can
say that for important passwords where you are in control of the
password database and it is extremely unlikely to get stolen, then four
randomly chosen words from a reasonably large dictionary is plenty (for
Abraxas this is 53 bits of entropy). If what the pass phrase is trying
to protect is very valuable and you do not control the password database
(ex., your brokerage account) you might want to follow the NIST
recommendation and use 6 words to get 80 bits of entropy. If you are
typing passwords on your work machine, many of which employ keyloggers
to record your every keystroke, then no amount of entropy will protect
you from anyone that has or gains access to the output of the keylogger.
In this case, you should consider things like one-time passwords or
two-factor authentication. Or better yet, only access sensitive accounts
from your home machine and not from any machine that you do not control.
SEE ALSO
========
abraxas(3), abraxas(5)
}"""
}
# API Manpage {{{1
API_MANPAGE = {
'name': 'abraxas',
'sect': '3',
'contents': r'''{
=========
abraxas
=========
------------------------------
collaborative password utility
------------------------------
:Author: Kale and Ken Kundert <abraxas@nurdletech.com>
:Date: {date}
:Version: {version}
:Manual section: 3
.. :Copyright: Kale and Ken Kundert
.. :Manual group: Utilities
DESCRIPTION
===========
The API to Abraxas will be simply demonstrated by example.
archive
+++++++
This program is used to generate an encrypted file that includes the
account numbers and login information for essential accounts. The
resulting file could be sent to your Executor or it could be printed and
saved in a safe place such as a safe deposit box. The idea is that this
information would help whoever needed to access your accounts in case
something happened to you.
Here is the *archive* script::
#!/bin/env python3
from __future__ import print_function, division
from abraxas import PasswordGenerator, PasswordError, Logging
from textwrap import indent
import gnupg
import sys
filename = 'kids.gpg'
recipients = [
'me@myfamily.name',
'son@myfamily.name',
'daughter@myfamily.name']
accounts = [
('login', 'Login'),
('disk', 'Disk encryption'),
('gpg', 'GPG'),
('boa', 'Bank of America'),
('tdwaterhouse', 'TD Waterhouse')]
try:
logger = Logging(exception=PasswordError)
pw = PasswordGenerator(logger=logger)
pw.read_accounts()
lines = []
for name, description in accounts:
lines += ["%s:" % (description if description else name)]
acct = pw.get_account(name)
# Remarks
remarks = acct.get_field('remarks')
if remarks:
if '\n' in remarks:
lines += [" remarks:"]
lines += [indent(remarks.strip(), ' ')]
else:
lines += [" remarks: " + remarks.strip()]
# Account number
account = acct.get_field('account')
if account:
if type(account) == list:
lines += [" account numbers:"]
lines += [" %s" % ',\n '.join(account)]
else:
lines += [" account number:", account]
# Username
username = acct.get_field('username')
if username:
lines += [" username:", username]
# Password
password = pw.generate_password()
if password:
lines += [" password:", password]
# Security questions
number = 0
security_questions = []
while True:
try:
question, answer = pw.generate_answer(number)
security_questions += [" %s ==> %s" % (question, answer)]
number += 1
except PasswordError:
break
if security_questions:
lines += [' security questions:']
lines += security_questions
lines += []
gpg = gnupg.GPG()
encrypted = gpg.encrypt('\n'.join(lines), recipients)
if not encrypted.ok:
sys.exit("%s: unable to encrypt.\n%s" % (filename, encrypted.stderr))
try:
with open(filename, 'w') as file:
file.write(str(encrypted))
print("%s: created." % filename)
except IOError as err:
sys.exit('%s: %s.' % (err.filename, err.strerror))
except KeyboardInterrupt:
sys.exit('Killed by user')
except PasswordError as err:
sys.exit(str(err))
The program starts by creating a logger. Normally this is not necessary.
When you run PasswordGenerator() without passing in a logger the default
logger is created for you. However, the default logger does not throw
exceptions. Instead, when a problem occurs an error message is printed
to standard error and the program exits. However, this utility needs
exceptions to be caught and handled, and so in this case a logger is
explicitly created and PasswordError is passed in. In this way, Abraxas
does not exit on an error, instead it throws a PasswordError.
mountall
++++++++
Here is a program that mounts a series of directories. It differs from
the above script in that is uses autotype, which it accesses through
*AutotypeWriter*. Specifically, the program never requests a password
directly from Abraxas. Instead, the PasswordGenerator object is passed
in when creating a AutotypeWriter object. It then queries the generator
directly for the password and then gets it directly to the user.
Mountall uses *sudo*, which requires a password the first time it is
run, and it runs *mount* for each directory, which requires a password
each time it is run.
Here is the *mountall* script::
#!/bin/env python
from __future__ import print_function, division
from fileutils import expandPath, makePath, ShellExecute as Execute, ExecuteError
from sys import exit
from os import fork
from time import sleep
from abraxas import PasswordGenerator, AutotypeWriter, PasswordError
shares = {{
'music': 'audio',
'lib/passwords': True,
'business': True,
'consulting': True,
'home': True,
'personal': True,
'photos': True,
'profession': True,
'reference': True}}
def run_cmd_with_password(cmd, pw_writer):
try:
if (fork()):
Execute(cmd)
else:
sleep(1)
pw_writer.write_autotype()
pw_writer.process_output()
exit()
except PasswordError as err:
exit(err.message)
try:
# Open the password generator
pw = PasswordGenerator()
pw.read_accounts()
writer = AutotypeWriter(pw)
# Clear out any saved sudo credentials. This is needed so that
# we can be sure the next run of sudo requests a password.
# Without this, the password that is autotyped may be exposed.
Execute('sudo -K')
# Get the login password
pw.get_account('login')
# Run sudo so that it requests the password and sets the
# credentials. In this way the subsequent calls to sudo will not
# request a password.
run_cmd_with_password('sudo true', writer)
# Get the Samba password
pw.get_account('dgc21')
for src, dest in shares.items():
if dest == True:
dest = src
absdest = expandPath(makePath('~', dest))
mountpoint = pipe('mountpoint -q %s' % absdest, accept=(0,1))
if mountpoint.status:
print("Mounting %s to %s" % (src, absdest))
run_cmd_with_password('sudo mount %s' % (absdest), writer)
else:
print("Skipping %s (already mounted)" % (dest))
except KeyboardInterrupt:
exit('Killed by user')
except ExecuteError as err:
exit(str(err))
except PasswordError, err:
sys.exit(str(err))
The program starts by instantiating both the *PasswordGenerator* and the
*AutotypeWriter* class. The *PasswordGenerator* class is responsible for
generating the password and *AutotypeWriter* gets it to the user. In
this case the autotype facility is used to mimic the keyboard. There are
other writers available for writing to a TTY, to stdout, and to the
system clipboard.
addkeys
+++++++
This script is used to pre-load a series of SSH keys into the SSH
agent. It is stimilar to the above script, except it uses pexpect
rather than autotype. This makes it a bit safer because pexpect waits
for the expected prompt from ssh-add, and so will not blindly spew out
the password if things go wrong::
#!/usr/bin/python3
import pexpect
from abraxas import PasswordGenerator, PasswordError
import sys
keys = [
# description keyfile abraxas account name
('primary rsa', 'id-rsa', 'ssh' ),
('primary ed25519', 'id-ed25519', 'ssh' ),
('digitalocean', 'digitalocean', 'do-ssh' ),
('tunnelr', 'tunnelr', 'tunnelr-ssh' ),
('dumper', 'dumper', 'dumper' ),
('github', 'github', 'github-ssh' ),
]
ssh_dir = '/home/toby/.ssh'
try:
pw = PasswordGenerator()
pw.read_accounts()
except PasswordError as error:
sys.exit(str(error))
for desc, name, acct in keys:
print('Adding %s ssh key' % desc)
try:
acct = pw.get_account(acct)
password = pw.generate_password()
sshadd = pexpect.spawn('ssh-add %s/%s' % (ssh_dir, name))
sshadd.expect(
'Enter passphrase for %s/%s: ' % (ssh_dir, name),
timeout=4
)
sshadd.sendline(password)
sshadd.expect(pexpect.EOF)
sshadd.close()
if sshadd.exitstatus:
print('addkeys: ssh-add: unexpected exit status:', sshadd.exitstatus)
except PasswordError as error:
sys.exit(str(error))
except (pexpect.EOF, pexpect.TIMEOUT):
sys.exit('addkeys: unexpected prompt from ssh-add: %s' % (
sshadd.before.decode('utf8')
))
except KeyboardInterrupt:
exit('Killed by user')
SEE ALSO
========
abraxas(1), abraxas(5)
}'''
}
# Configuration Files Manpage {{{1
CONFIG_MANPAGE = {
'name': 'abraxas',
'sect': '5',
'contents': r'''{
=========
abraxas
=========
------------------------------
collaborative password utility
------------------------------
:Author: Kale and Ken Kundert <abraxas@nurdletech.com>
:Date: {date}
:Version: {version}
:Manual section: 5
.. :Copyright: Kale and Ken Kundert
.. :Manual group: Utilities
DESCRIPTION
===========
Abraxas requires two files to operate. The master password file and the
accounts file. You may optionally add a third file that gives the
dictionary used when creating pass phrases.
Master Password File
++++++++++++++++++++
The master password file is named '~/.config/abraxas/master.gpg'. It is
encrypted with the GPG ID that you specified when you ran 'abraxas
--init'. It is a Python file that contains a collection of variables.
To be able to edit it conveniently it is recommended that you add the
gnupg plugin to vim (download it from
``http://www.vim.org/scripts/script.php?script_id=3645`` and copy it
into ~/.vim/plugin).
dict_hash
~~~~~~~~~
This is a hash of the file that contains the words used when generating
pass phrases. You should not change this value. It is used to warn you
if somehow your words file is changed or corrupted, which would corrupt
your pass phrases.
secrets_hash
~~~~~~~~~~~~
This is a hash of the file that contains the code used when generating
the hash and converting it to a password or pass phrase. It is used to
warn you that the secrets code has changed, presumably when the program
itself was updated. If this occurs you should verify that the passwords
it generates are the same. If not, you should not use the updated
version of the program. If they are the same, you should update the
*secrets_hash*. Do this by moving the existing *master.gpg* file out of
the way, generating a new one with *abraxas --init*, copying the new
*secrets_hash* to the original file, and then moving it back to its
original location of *~/.config/abraxas/master.gpg*.
charsets_hash
~~~~~~~~~~~~~
This is a hash of the file that contains the alphabets and the exclude
function that you can use when creating alphabets for your
character-based passwords. It is used to warn you that the character
sets code has changed, presumably when the program itself was updated.
If this occurs you should verify that the passwords it generates are the
same. If not, you should not use the updated version of the program. If
they are the same, you should update the *charsets_hash*. Do this by
moving the existing *master.gpg* file out of the way, generating a new
one with *abraxas --init*, copying the new *charsets_hash* to the
original file, and then moving it back to its original location of
*~/.config/abraxas/master.gpg*.
accounts
~~~~~~~~
This is the name of the accounts file. The name may be given with or
without an encryption suffix (``.gpg`` or ``.asc``). If given with an
encryption suffix, the file must be encrypted. If given without
a suffix, the file may still be encrypted (in which case the file itself
should have a encryption suffix) but need not be.
passwords
~~~~~~~~~
This is a dictionary that gives your master passwords. Each entry is
a pair of the password ID and then password itself. For example::
passwords = {{
'default': """l8i6-v?>GCTQK"oz3yzZg5Ne=&,.!*Q$2ddEaZbESwnl<4*BRi1D887XQ!W4/&}}e""",
'derrick and peter': "hush puppie",
'derrick and debbie': "lounge lizard",
}}
As shown, your account comes preloaded with a very long and very random
default password.
Generally you will never have to type these passwords again, so there is
little reason not to make them long and very random. There are no limits
on the length of the passwords or the characters they may contain, so
you can go wild. For example, using your default master password you
could use Abraxas to generate new master passwords::
$ abraxas -T =extreme 'derrick and peter'
PASSWORD: [Y$*{{QCf"?yvDc'{{4v?4r.iA0b3brHY z40;lZIs~bjj<DpDz&wK!XCWq=,gb}}-|
You can then use that string as a master password. Notice that this
string contains quote characters, meaning that you will have to embed it
in triple quotes to avoid trouble::
passwords = {{
'default': """l8i6-v?>GCTQK"oz3yzZg5Ne=&,.!*Q$2ddEaZbESwnl<4*BRi1D887XQ!W4/&}}e""",
'derrick and peter': """[Y$*{{QCf"?yvDc'{{4v?4r.iA0b3brHY z40;lZIs~bjj<DpDz&wK!XCWq=,gb}}-|""",
'derrick and debbie': "lounge lizard",
}}
Of course it is not necessary to go to these extremes. Your password
must just not be guessable. One reason not to go to such extremes is if
you need to share a master password with a friend while talking over the
phone. In this case, using the =master template to generate a simple
but long pass phase is much preferred::
$ abraxas -T =master "derrick and debbie"
PASSWORD: impulse nostril double irony conflate rookie posting blind
Then your passwords entry becomes::
passwords = {{
'default': """l8i6-v?>GCTQK"oz3yzZg5Ne=&,.!*Q$2ddEaZbESwnl<4*BRi1D887XQ!W4/&}}e""",
'derrick and peter': """[Y$*{{QCf"?yvDc'{{4v?4r.iA0b3brHY z40;lZIs~bjj<DpDz&wK!XCWq=,gb}}-|""",
'derrick and debbie': """impulse nostril double irony conflate rookie posting blind""",
}}
This approach of using the default password to generate new master
passwords, each of which has a very predictable name, can make it
possible for you to reconstruct your master password file if you happen
to lose it. To do so, you will need to keep a copy of the default
password in a safe place (along with your master GPG keys in a safe
deposit box, for example). Of course, you really should save both
the master password and accounts file in a safe place because they
contain additional information that is used to generate your passwords
(account names, versions, security questions, etc.). You should be aware
that these tend to change with time and so your saved files can quickly
go out of date. However, if your follow a practice of using very
systematic naming strategies for master passwords, accounts, versions,
and the like, you can dramatically increase the chances of being able to
retrieve your passwords from an old master password and accounts file.
You are free to name your master passwords in any manner that pleases
you. One reasonable approach is to name them after the people that use
them. Thus in the example above, Derrick has one key he uses his default
key for for his own accounts and two others for accounts he shares with
Debbie and Peter. When it comes time to abandon a master password,
simply add '(deprecated <date>)' to the end of the master password name,
where <date> is replaced with the date that the password was deprecated.
When doing so, be sure to also change the name used in the *accounts*
file so that the existing passwords do not change. That way you do not
have to update all of your passwords at once. Rather, you update the
high value ones immediately and migrate the others as you get time.
Using this approach your master password file might look like this::
passwords = {{
'default': """l8i6-v?>GCTQK"oz3yzZg5Ne=&,.!*Q$2ddEaZbESwnl<4*BRi1D887XQ!W4/&}}e""",
'derrick and peter (deprecated 120301)':
"""[Y$*{{QCf"?yvDc'{{4v?4r.iA0b3brHY z40;lZIs~bjj<DpDz&wK!XCWq=,gb}}-|""",
'derrick and peter': """h#KLT@f0IN(srTs$CBqRvMowBfiCT26q\yox(]w!PSlj_|ZMuDZ|{{P0Jo4:aa4M"""
'derrick and debbie': """impulse nostril double irony conflate rookie posting blind""",
}}
Generally one uses the default password for the personal passwords, and
only creates new shared master passwords. In this case, one member of
the group uses their master password to generate a the shared password
for the group. And of course, you should strive to keep your master
passwords completely secure. Never disclose a master password to anyone
else unless you plan to share that particular master password with them
to generate shared passwords.
default_password
~~~~~~~~~~~~~~~~
The ID of the default master password::
default_password = "default"
This password will be used when an account does not explicitly specify
a master password. It is recommended you set the default master password
once and after that never change it, because if you do, the passwords
that rely on it will also change. You are given a very secure default
password when your master password file is initially created for you. It
is recommended that you never change it.
Using a value of None for default_password disables the default
password, forcing you to always specify a master password. If the master
password is not given in the accounts file, it will be requested when
Abraxas is run, which allows you to use a master password that is not
stored in the master password file. This provides the ultimate in
security for stealth accounts in that even if someone guessed the name
of your stealth account and had access to your private GPG key, perhaps
because you were compelled to give it to them, they still could not
regenerate the pass phrase for your stealth account because it requires
a master password that only you know but can plausibly deny having.
password_overrides
~~~~~~~~~~~~~~~~~~
A dictionary that contains passwords for specific accounts. These
passwords will be produced rather than the generated passwords. For
example::
password_overrides = {{
'yahoo': 'lollipop',
'nytimes': 'excelsior',
}}
Password overrides are generally used in two situations. First is when
a password is provided to you (you have no or limited ability to choose
it). Second is for the accounts you have not yet migrated to the new
passwords generated by Abraxas.
additional_master_password_files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A list of additional master password files. This is helpful in cases
where you want to have a separate file for passwords shared with others.
The additional master password files must also be encrypted. If they
are truly shared, then you will want to encrypt them using multiple
recipients.
An additional master password file is also a Python file, and the only
things that are used by Abraxas in this file are the dictionaries named
*passwords* and *password_overrides*.
You can specify a single master password file using a string, and
multiple master password files as a list of strings. Here is how to
specify a single additional master password file::
additional_master_password_files = "business/master.gpg"
Here is how you specify multiple additional master password files::
additional_master_password_files = [
"business/master.gpg",
"charity/master.gpg"
]
Accounts File
+++++++++++++
The accounts file is by default '~/.config/abraxas/accounts', but could
also end with either a '.gpg' or '.asc' extension if it is encrypted.
It starts out importing some character sets. You are free to modify
these but there is generally no reason to. They are there to help you
create alphabets for your passwords. A function exclude() is also
defined, which allows you to create an alphabet by removing characters
from the preexisting ones. You can add characters simply summing them.
The accounts file is a Python file that contains variables that are used
by the password program. When created it will lead off with some useful
imports. The *dedent* function is used to strip off leading white space
from multiline remarks. The passwords.charsets import provides
a collection of useful character sets::
LOWERCASE = "abcdefghijklmnopqrstuvwxyz"
UPPERCASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
LETTERS = LOWERCASE + UPPERCASE
DIGITS = "0123456789"
ALPHANUMERIC = LETTERS + DIGITS
HEXDIGITS = "0123456789abcdef"
PUNCTUATION = """!"#$%&'()*+,-./:;<=>?@[\]^_`{{|}}~"""
WHITESPACE = " \t"
PRINTABLE = ALPHANUMERIC + PUNCTUATION + WHITESPACE
DISTINGUISHABLE = exclude(ALPHANUMERIC, 'Il1O0\\t')
Finally, the *exclude* function is used to remove characters from
a character set.
The following attributes are read and used by the password program if
they exist in an accounts file.
log_file
~~~~~~~~
Specifies the location of the log file. If not given, it defaults to
'~/.config/abraxas/log'. An absolute path should be used to
specify the file. If a '.gpg' or '.asc' suffix is given on this file, it
will be encrypted using your public key. Without encryption, this file
leaks account names.
archive_file
~~~~~~~~~~~~
Specifies the location of the archive file. If not given, it defaults to
'~/.config/abraxas/archive.gpg'. An absolute path should be used to
specify the file. The file should end with a .gpg extension.
gpg_id
~~~~~~
The GPG ID of the user (it is used to encrypt the archive file). It
would either by the email address associated with the ID, or the eight
digit hexadecimal GPG key ID if you have multiple keys associated with
the same email address.
accounts
~~~~~~~~
A dictionary where each entry represents either an account or
a template. By convention, templates have an ID that starts with '='.
Templates are used to limit the information you need to give in an
account. You just create or use a template that has the desired base
information. Then when creating an account, you can refer to the
template and only specify the fields that need to be unique for that
account. The template for an account can be another account or
a template. In this way templates are just accounts that are not
associated with any particular account in the real world. For example::
accounts = {{
"=words": {{ # typically used for Linux pass phrases
'type': 'words',
'num-words': 4,
'autotype': "{{password}}{{return}}",
}},
"gmail": {{
'template': "=words",
'username': "derrickAsh",
'url': "https://accounts.google.com",
'master': 'derrick',
'window': [
'Google Accounts*',
'Gmail*',
],
'autotype': "{{username}}{{tab}}{{password}}{{return}}",
}},
...
}}
In this example '=words' is specified as the template for 'gmail' (it is
a purely optional convention to add a leading = to account names that
are intended to be used only as templates). Thus any field specified in
'=words' that is not specified in 'gmail' is inherited by 'gmail'. Any
field specified in 'gmail' overrides the field with the same name from
'=words' when using 'gmail'. This process of inheritance can chain
through any number of templates or accounts. For example, you can create
another account, say 'gmail-work' that uses 'gmail' as a template but
overrides the 'username'.
The ID associated with an account is used in the process of generating
the secrets for the account. For this reason you should choose IDs that
are unambiguous and unlikely to change. The resulting IDs may be long
and hard to type. You can use the *aliases* entry to specify shorter
names that can be used as an alternative to the primary account ID. For
example, when creating your gmail account, it is a good idea to add your
username to the account ID, because in the future you might create
additional gmail accounts. So, *gmail-username* would be a good account
name. Then you should add a short name like *gmail* as an alias to the
one you use the most. If at some point you migrate to a new gmail
account for your day-to-day use, you can move the *gmail* alias to this
new account without changing the generated password.
additional_accounts
~~~~~~~~~~~~~~~~~~~
A list of additional account files. This is helpful in cases where you
want to have a separate file for accounts shared with someone else. In
this way you can share the details of the shared accounts file without
exposing your personal accounts. The additional account files may also
be encrypted. If they are truly shared, then you will want to encrypt
them using multiple recipients.
An additional accounts file is also a Python file, and the only thing
that is used by Abraxas in this file is a dictionary named *accounts*.
It is generally a good idea to start from a copy of the original
accounts file and simply delete unnecessary definitions (*log_file*,
*archive_file* and *gpg_id*) and the non-shared accounts. In this way,
you still can use the character sets that are defined at the top of
the file.
You can specify a single account file using a string, and multiple
account files as a list of strings. Here is how to specify a single
additional account file::
additional_accounts = "business/accounts"
Here is how you specify multiple additional account files::
additional_accounts = ["business/accounts", "charity/accounts"]
Accounts Fields
+++++++++++++++
Each dictionary in *accounts* may contain a number of fields that are
described next. When first created the accounts dictionary comes with
some useful templates and an example account entry that is commented
out. Feel free to modify the templates and delete the example account.
template
~~~~~~~~
A string containing the ID of the template for this account (explained
above).
master
~~~~~~
A string containing the ID of the master password for this account.
It is recommended that each account explicitly declare its master
password (perhaps through a template). That way existing passwords do
not change if you were to change your default master password.
version
~~~~~~~
The version is a string and its contents are arbitrary, however when its
contents change so to does the generated password. So it can be as
simple as a number or it could be a date or whatever you like. But it is
good if you pick a convention and stick with it so that if you somehow
lose your accounts file you still have some hope of recovering your
passwords.
Some websites put odd restrictions on the generated passwords, such as
it must contain a digit and a symbol or it imposes a limit on the
maximum number of repeated characters. Some of these restrictions can be
satisfied by adding a prefix or a suffix, but for others, like the
repeated character limit, there is no built in support in Abraxas to
always satisfy them. In this case you can simply bump the version until
you get a password that meets their requirements.
password-type
~~~~~~~~~~~~~
The type of password to generate. Should be either 'words' (default) to
generate pass phrases or 'chars' to generate passwords.
num-words
~~~~~~~~~
The number of words to use in the pass phrase when 'type' is 'words'
(default is 4).
separator
~~~~~~~~~
A string that is used as the inter-word separator when 'type' is
'words'. If not given, a space is used.
num-chars
~~~~~~~~~
The number of characters to use in the passwords when 'type' is 'chars'
(default is 12).
alphabet
~~~~~~~~
A string containing the characters to use when creating a password when
'type' is 'chars'. The default alphabet consists of the standard upper
and lower case letters along with the digits.
prefix
~~~~~~
A string whose contents are added to the beginning of a password or
passphrase.
suffix
~~~~~~
A string whose contents are added to the end of a password or
passphrase.
aliases
~~~~~~~
List of names that can be used as aliases for this account. This
feature is often used to specify a shorter and easier to type name for
the account.
The secrets are generated based on the primary account name (the key for
dictionary that describes the account). As such, that name should be
chosen so that it is unambiguous and you will never be tempted to change
it. That often results in a name that is too long to type easily. This
entry allows you to specify one or more names that can be used as
aliases for the primary account name. For example, you might want to
choose a name like "gmail-derrickAsh" as the primary name of your gmail
account and "gmail" as an alias. This would allow you to later create
another gmail account and make it your primary gmail account simply by
moving the "gmail" alias the new account.
When sharing your accounts you may not wish to share your aliases. For
example, if both you and your partner have accounts at Chase you may
want to both use the alias Chase to refer to two different accounts.
You can arrange this by using some Python code as follows::
from getpass import getuser
accounts = {{
'chase-amy': {{
'aliases': ['chase'] if getuser() == 'amy' else []
...
}},
'chase-laura': {{
'aliases': ['chase'] if getuser() == 'laura' else []
...
}},
}}
username
~~~~~~~~
A string containing the username for the account.
account
~~~~~~~
Either an account identifier for the account or a list containing
multiple account identifier. Account identifiers must be given as
strings.
email
~~~~~
A string containing the email address associated with the account.
url
~~~
A string containing the web address of the account or a list of strings
each containing a web address.
If a list of URLs are provided, the first will be used with the
``--browser`` and ``--default-browser`` command line arguments. In this
case, the browser will be started and directed to display the first
address. All the addresses are used in account discovery. If a URL
component is discovered in a title bar, it will be compared against all
of the URLs given in the list looking for a match. The URLs may be glob
strings to generalize the matching. Given that the first URL can be sent
to the browser it is best not to use globbing in the first URL.
When a URL is used in account discovery, the presence of the
communication protocol is significant. If the URL starts with
'https://', then Abraxas insists on the use of an encrypted link.
If the link is not encrypted, the account will not be selected as
a match and a warning will be issued (this is a relatively common way of
tricking you into disclosing your password). Even if the URL
does not start with 'https://', Abraxas will also require
a encrypted link if PREFER_HTTPS is set to True in ``password/prefs.py``
unless the URL starts with 'http://'.
remarks
~~~~~~~
A string containing any relevant remarks about the account. You can
create a multiline remark as follows::
'remarks': dedent("""
Wireless network settings:
SSID: ourhouse
Network security: WPA2 Personal
""")
security questions
~~~~~~~~~~~~~~~~~~
A list of strings containing the security questions they force you to
answer. The string does not need to contain the question verbatim,
a shortened version that is sufficient for you to identify which of the
questions you need to provide the answer to is enough. For example,
a typical list of security questions might be::
'security questions': [
"first teacher's name",
"name of elementary school",
],
When initially giving the answers to these questions, you will have to
select the questions you will answer, enter them into the accounts file,
then get the answers by running Abraxas, and then copying the
answers into the web page for your account. In this way, your answers
will be quite unpredictable, even to those that know you well.
The answers to the security questions will change if you change your
security questions. Even the smallest change will result in a completely
different answer. Once you have given the answers to your account
provider you must not change the question at all unless you are willing
to go through the trouble of updating the answers.
window
~~~~~~
This may be either a glob string or a list of glob strings that match
the title of the web page used to enter the username/password for the
account. This is used to determine which account should be used if no
account name is given on the command line.
This enables you to set up a hot key, such as Alt-P, to run 'abraxas
--autotype', which will identify which account to use from the active
window title and then use the *autotype* field to log you in.
When using commands from a shell the title of the window is generally
unaffected by the command that is running. However, you can write
a simple script that first sets the window title and then runs the
command. Here is an example of such a script for mutt::
#!/bin/sh
xdotool getactivewindow set_window --name "Mutt"
mutt
Alternatively, you can switch to Lilyterm, which is a Linux terminal
emulator that I can recommend and that plays particularly nicely with
Abraxas. It copies the command being run to the window title so it
can be used to determine which account to use.
Generally the window feature works well with web browsers, though some
sites neglect to put identifying information in the title bar of their
login page. This can be addressed in Firefox and Thunderbird by
installing the 'Hostname in Titlebar' add on. In Chrome, use 'Url in
Title'. They add the URL to the title bar, making it available to be
matched with a window glob string. This add on also adds the protocol
to the title as well. That allows you to key the password in such a way
that it will not autotype unless the connection is encrypted (the
protocol is https).
In its default configuration, Abraxas recognizes the components
in a 'Hostname in Titlebar' title. Those components, which include the
title, the hostname, and the communication protocol (http or https), and
compare those to the corresponding entries in each account. The title is
compared to the *window* entries and the hostname and protocol are
compared against the *url*. If no match is made with these components,
then the raw title is compared against the *window* entries.
When sharing your accounts with a partner you may not wish to share your
window settings. For example, if both you and your partner have
accounts at Chase and you both want to have the window title on the
Chase web page to trigger your own account. You can arrange this by using
some Python code as follows::
from getpass import getuser
accounts = {{
'chase-amy': {{
'window': ['CHASE Bank*'] if getuser() == 'amy' else []
}},
'chase-laura': {{
'window': ['CHASE Bank*'] if getuser() == 'laura' else []
}},
}}
You might also find that you need different passwords on different
machines. For example, you may have root access on several machines,
each of which has a different root password. You can handle this as
follows::
from socket import gethostname
accounts = {{
'root-mars': {{
'template': '=words',
'window': ['su'] if gethostname() == 'mars' else []
}},
'root-venus': {{
'template': '=words',
'window': ['su'] if gethostname() == 'venus' else []
}},
}}
autotype
~~~~~~~~
A string containing a script that controls autotyping (when the -t or
--autotype command line option is specified). The script consists of
characters that will be emitted verbatim and codes that specify actions
to take. Primarily the action is to replace the code with a character,
a field from the account, or a secret. But the sleep action can be used
to cause a pause in the typing. The following actions are supported:
| {{username}} Replaced with the username for the account.
| {{account}} Replaced with the account number for the account.
| {{url}} Replaced with the URL for the account.
| {{email}} Replaced with the email address for the account.
| {{remarks}} Replaced with the remarks for the account.
| {{password}} Replaced with the password for the account.
| {{question *N*}} Replaced with security question *N* (*N* is an integer).
| {{answer *N*}} Replaced with the answer to security question *N* (*N* is an integer).
| {{sleep *S*}} Typing is paused for *S* seconds (*S* a real number)
| {{tab}} Replaced with a tab.
| {{return}} Replaced with newline.
The default autotype script is
"{{username}}{{tab}}{{password}}{{return}}"
Other Fields
~~~~~~~~~~~~
The value of all other fields will be printed when the user requests all
information about the account.
Words File
++++++++++
The words file is '~/.config/abraxas/words'. The use of this file is
optional. Abraxas has its own words that it uses if you do not provide
a file yourself. It should contain a large number of words (thousands),
one word per line. The more words your file contains, the more secure
your pass phrases are, however anymore than 65,536 are not used.
Do not change this file once you have started creating passwords, and be
sure to back it up. Any change to this file will cause the generated
pass phrases to change, which means you will not be able to use
Abraxas to login to existing accounts that use pass phrases.
EXAMPLE
=======
Master Password File
++++++++++++++++++++
Here is a representative master password file
(~/.config/abraxas/master.gpg)::
dict_hash = "d9aa1c08e08d6cacdf82819eeb5832429eadb95a"
secrets_hash = "db7ce3fc4a9392187d0a8df7c80b0cdfd7b1bc22"
passwords = {{
'derrick and peter': "e9a7a4246a6a95f179cd4579e6f9cb69",
'derrick and debbie': "60b56e021118ca2a261f405e15ac0165",
'default': """[Y$*{{QCf"?yvDc'{{4v?4r.iA0b3brHY z40;lZIs~bjj<DpDz&wK!XCWq=,gb}}-|""",
}}
default_password = 'default'
password_overrides = {{
'yahoo': 'lollipop',
'nytimes': 'excelsior',
}}
Accounts File
+++++++++++++
Here is a representative accounts file (~/.config/abraxas/accounts) with
the boilerplate code generated by Abraxas itself stripped off for
brevity::
# Give the desired location of the file
logfile = '~/.config/abraxas/log'
# Account Information
accounts = {{
# Templates
"=words": {{ # typically used for Linux pass phrases
'type': 'words',
'num-words': 4,
'autotype': "{{password}}{{return}}",
}},
"=chars": {{ # typically used for web passwords
'type': 'chars',
'num-chars': 12,
'alphabet': ALPHANUMERIC + PUNCTUATION,
'autotype': "{{username}}{{tab}}{{password}}{{return}}",
}},
# Accounts
"login": {{
'template': "=words",
'window': [
'su',
'su *',
]
}},
"mail": {{
'template': "login",
'window': 'Mutt *',
}},
"ssh": {{
'template': "login",
'version': 'ssh',
'window': 'tcsh: *',
}},
"bank": {{
'template': "=chars",
'username': "derrickash",
'email': "derrickAsh@gmail.com",
'url': "https://hpcu.com",
'account': "1987-357836",
'window': [
'HP Credit Union*',
'Hewlett-Packard Credit Union*',
],
'autotype': "{{account}}{{tab}}{{password}}{{return}}",
}},
"gmail": {{
'template': "=words",
'username': "derrickAsh",
'email': "derrick.ash@yahoo.com",
'url': "https://accounts.google.com",
'security questions': [
"first teacher's name",
"name of elementary school",
],
'window': [
'Google Accounts*',
'Gmail*',
],
'autotype': "{{username}}{{tab}}{{password}}{{return}}",
}},
"yahoo": {{
'template': "=chars",
'username': "derrickAsh",
'email': "derrickAsh@gmail.com",
'url': "https://login.yahoo.com",
'window': 'Sign into Yahoo!*',
}},
"nytimes": {{
'template': "=chars",
'username': "derrickAsh",
'email': "derrickAsh@gmail.com",
'url': "https://myaccount.nytimes.com/auth/login",
'window': '*The New York Times*',
}},
"consumer-reports": {{
'template': "=chars",
'master': 'derrick and debbie',
'username': "DandD",
'url': "https://ec.consumerreports.org/ec/myaccount/login.htm",
'window': 'My account login*',
}},
}}
CONFIGURATION
=============
The file ``passwords/prefs.py`` in the source code contains various
configuration settings that can be set to change the behavior of
Abraxas. You should be careful when changing these. Some settings
can be changed with little concern, but others match the implementation
and changing them my require changes to the underlying code.
SEE ALSO
========
abraxas(1), abraxas(3)
}'''
}
# Generate restructured text {{{1
def write(genRST=False):
for each in [PROGRAM_MANPAGE, API_MANPAGE, CONFIG_MANPAGE]:
rst = dedent(each['contents'][1:-1]).format(
date=DATE,
version=VERSION,
search_fields=', '.join(SEARCH_FIELDS)
)
# generate reStructuredText file (only used for debugging)
if genRST:
print("generating %s.%s.rst" % (each['name'], each['sect']))
with open('%s.%s.rst' % (each['name'], each['sect']), 'w') as f:
f.write(rst)
# Generate man page
print("generating %s.%s" % (each['name'], each['sect']))
with open('%s.%s' % (each['name'], each['sect']), 'w') as f:
f.write(publish_string(rst, writer=manpage.Writer()).decode())
if __name__ == '__main__':
write(True)
# vim: set sw=4 sts=4 formatoptions=ntcqwa12 et spell:
| KenKundert/abraxas | manpage.py | Python | gpl-3.0 | 83,088 | 0.008425 |
# Copyright (c) 2013-2016 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
from configobj import ConfigObj, flatten_errors, get_extra_values, \
ConfigObjError
from validate import Validator
import xdg.BaseDirectory
from .exceptions import InvalidSettingsError, CannotParseConfigFileError
from khal import __productname__
from ..log import logger
from .utils import is_timezone, weeknumber_option, config_checks, \
expand_path, expand_db_path, is_color
SPECPATH = os.path.join(os.path.dirname(__file__), 'khal.spec')
def _find_configuration_file():
"""Return the configuration filename.
This function builds the list of paths known by khal and
then return the first one which exists. The first paths
searched are the ones described in the XDG Base Directory
Standard. Each one of this path ends with
DEFAULT_PATH/DEFAULT_FILE.
On failure, the path DEFAULT_PATH/DEFAULT_FILE, prefixed with
a dot, is searched in the home user directory. Ultimately,
DEFAULT_FILE is searched in the current directory.
"""
DEFAULT_FILE = __productname__ + '.conf'
DEFAULT_PATH = __productname__
resource = os.path.join(DEFAULT_PATH, DEFAULT_FILE)
paths = []
paths.extend([os.path.join(path, resource)
for path in xdg.BaseDirectory.xdg_config_dirs])
paths.append(os.path.expanduser(os.path.join('~', '.' + resource)))
paths.append(os.path.expanduser(DEFAULT_FILE))
for path in paths:
if os.path.exists(path):
return path
return None
def get_config(config_path=None):
"""reads the config file, validates it and return a config dict
:param config_path: path to a custom config file, if none is given the
default locations will be searched
:type config_path: str
:returns: configuration
:rtype: dict
"""
if config_path is None:
config_path = _find_configuration_file()
logger.debug('using the config file at {}'.format(config_path))
try:
user_config = ConfigObj(config_path,
configspec=SPECPATH,
interpolation=False,
file_error=True,
)
except ConfigObjError as error:
logger.fatal('parsing the config file file with the following error: '
'{}'.format(error))
logger.fatal('if you recently updated khal, the config file format '
'might have changed, in that case please consult the '
'CHANGELOG or other documentation')
raise CannotParseConfigFileError()
fdict = {'timezone': is_timezone,
'expand_path': expand_path,
'expand_db_path': expand_db_path,
'weeknumbers': weeknumber_option,
'color': is_color,
}
validator = Validator(fdict)
results = user_config.validate(validator, preserve_errors=True)
abort = False
for section, subsection, error in flatten_errors(user_config, results):
abort = True
if isinstance(error, Exception):
logger.fatal(
'config error:\n'
'in [{}] {}: {}'.format(section[0], subsection, error))
else:
for key in error:
if isinstance(error[key], Exception):
logger.fatal('config error:\nin {} {}: {}'.format(
sectionize(section + [subsection]),
key,
str(error[key]))
)
if abort or not results:
raise InvalidSettingsError()
config_checks(user_config)
extras = get_extra_values(user_config)
for section, value in extras:
if section == ():
logger.warn('unknown section "{}" in config file'.format(value))
else:
section = sectionize(section)
logger.warn('unknown key or subsection "{}" in '
'section "{}"'.format(value, section))
return user_config
def sectionize(sections, depth=1):
"""converts list of string into [list][[of]][[[strings]]]"""
this_part = depth * '[' + sections[0] + depth * ']'
if len(sections) > 1:
return this_part + sectionize(sections[1:], depth=depth + 1)
else:
return this_part
| dzoep/khal | khal/settings/settings.py | Python | mit | 5,410 | 0 |
"""
Python Compression and Archiving Library
@author: Clivern U{hello@clivern.com}
"""
__VERSION__ = "1.0.0" | Clivern/PyArchiver | pyarchiver/__init__.py | Python | mit | 110 | 0.009091 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slicing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SliceTest(XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.slice(i, [2], [4])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 3, 4, 5], result)
def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.slice(i, [1, 2, 2], [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
class StridedSliceTest(XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [2], [6], [2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 4], result)
def test1DNegtiveStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [6], [2], [-2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([6, 4], result)
def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 2, 2], [2, 3, 6], [1, 1, 2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 9]], [[6, 4]]], result)
def test3DNegativeStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 4, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0],
[4, 5, 2, 4, 3, 7, 6, 8, 9, 4]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7],
[7, 1, 7, 1, 8, 1, 8, 1, 3, 1]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9],
[9, 9, 5, 5, 6, 6, 3, 3, 6, 6]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[9, 8],
[1, 1]],
[[2, 4],
[5, 7]]], result)
if __name__ == "__main__":
googletest.main()
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/compiler/tests/slice_ops_test.py | Python | apache-2.0 | 5,088 | 0.004324 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_profile', '0004_auto_20150802_0153'),
]
operations = [
migrations.RemoveField(
model_name='imagerprofile',
name='name',
),
migrations.AddField(
model_name='imagerprofile',
name='nickname',
field=models.CharField(max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='address',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='camera',
field=models.CharField(help_text=b'What is the make and model of your camera?', max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='photography_type',
field=models.CharField(blank=True, max_length=64, null=True, help_text=b'What is your photography type?', choices=[(b'H', b'Hobbist'), (b'A', b'Abstract'), (b'B', b'Black and White'), (b'P', b'Panorama'), (b'J', b'Journalism')]),
),
migrations.AlterField(
model_name='imagerprofile',
name='website_url',
field=models.URLField(null=True, blank=True),
),
]
| tpeek/bike_safety | imagersite/imager_profile/migrations/0005_auto_20150802_0303.py | Python | mit | 1,491 | 0.001341 |
import BaseHTTPServer
import logging
import os
import sys
import threading
import time
from chirp.common.conf import (BARIX_STATUS_HOST, BARIX_STATUS_PORT,
BARIX_HOST, BARIX_PORT)
from chirp.stream import barix
_TIMEOUT_S = 2
_POLLING_FREQUENCY_S = 5
_STATUS_PAGE = """<html><head>
<title>Barix Status</title>
<meta http-equiv=refresh content="10; url=.">
</head><body>
<h1>Barix Status</h1>
<small><i>This page will automatically update every 10 seconds.</i></small><br>
<small><i>Levels are averaged over the last %(level_avg_window_minutes)d
minutes.</i></small><br>
<br><br>
As of %(status_time)s:
<table>
<tr><td>Status</td><td>%(status)s</td></tr>
<tr><td>Left Level</td><td>%(left_level)s (avg %(left_level_avg)s)</td></tr>
<tr><td>Right Level</td><td>%(right_level)s (avg %(right_level_avg)s)</td></tr>
<tr><td>Live365?</td><td>%(live365_connected)s</td></tr>
<tr><td>Archiver?</td><td>%(archiver_connected)s</td></tr>
</table>
</body></html>
"""
# If we poll every 5s, 360 samples = 30 minutes
LEVEL_HISTORY_MAX_SIZE = 360
level_history = []
class _RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
b_obj = self.barix
NOT_CONNECTED = "<b>NOT CONNECTED</b>"
left_level_avg = 0
right_level_avg = 0
level_avg_window_minutes = 0
if level_history:
N = len(level_history)
left_level_avg = sum(L for L, _ in level_history) / N
right_level_avg = sum(R for _, R in level_history) / N
level_avg_window_minutes = N * _POLLING_FREQUENCY_S / 60
barix_info = {
"status_time": b_obj.last_update_time_str,
"status": b_obj.status,
"left_level": b_obj.left_level,
"right_level": b_obj.right_level,
"left_level_avg": int(left_level_avg),
"right_level_avg": int(right_level_avg),
"level_avg_window_minutes": int(level_avg_window_minutes),
"live365_connected": NOT_CONNECTED,
"archiver_connected": NOT_CONNECTED,
}
# TODO(trow): Check IP address.
if "12345" in b_obj.clients:
barix_info["live365_connected"] = "connected"
# TODO(trow): Check IP address.
if "12346" in b_obj.clients:
barix_info["archiver_connected"] = "connected"
response_str = _STATUS_PAGE % barix_info
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(response_str)))
self.end_headers()
self.wfile.write(response_str)
def log_message(self, format, *args):
pass # Throw away log messages for now.
def handle_requests(srv, done):
while not done.isSet():
try:
srv.handle_request()
except Exception, err:
logging.exception("Swallowed exception")
def poll_barix(b_obj, log_fh):
try:
if not b_obj.ping():
return
level_history.append(
(float(b_obj.left_level), float(b_obj.right_level)))
if len(level_history) > LEVEL_HISTORY_MAX_SIZE:
level_history.pop(0)
if log_fh:
now = int(b_obj.last_update_time)
ip, far_port = b_obj.clients.get("12345", ("None", 0))
log_info = "%d %04x %s\n" % (now, int(far_port), ip)
log_fh.write(log_info)
log_fh.flush()
except Exception, err:
logging.exception("Swallowed exception")
def main():
log_path = os.path.join(os.environ["HOME"], "live365_connection.log")
log_fh = open(log_path, "a")
_RequestHandler.barix = barix.Barix(BARIX_HOST, BARIX_PORT)
srv = BaseHTTPServer.HTTPServer((BARIX_STATUS_HOST, BARIX_STATUS_PORT),
_RequestHandler)
srv.socket.settimeout(_TIMEOUT_S)
done = threading.Event()
th = threading.Thread(target=handle_requests, args=(srv, done))
th.start()
while True:
try:
poll_barix(_RequestHandler.barix, log_fh)
time.sleep(_POLLING_FREQUENCY_S)
except KeyboardInterrupt:
break
except Exception:
logging.exception("Swallowed exception")
if log_fh:
log_fh.close()
done.set()
th.join() # Wait for the serving thread to settle.
if __name__ == "__main__":
main()
| chirpradio/chirpradio-machine | chirp/stream/do_proxy_barix_status.py | Python | apache-2.0 | 4,423 | 0.000452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.