text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
Given Style Rules, create an SLD in XML format add it to a layer
"""
if __name__=='__main__':
import os, sys
DJANGO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(DJANGO_ROOT)
os.environ['DJANGO_SETTINGS_MODULE'] = 'geonode.settings'
import logging
import os
from random import choice
import re
from xml.etree.ElementTree import XML, ParseError
try:
from urlparse import urljoin
except:
from urllib.parse import urljoin # python 3.x
from django.utils.translation import ugettext as _
from django.conf import settings
from geonode.contrib.dataverse_connect.layer_metadata import LayerMetadata
from geonode.maps.models import Layer
from geonode.contrib.dataverse_styles.geoserver_rest_util import make_geoserver_json_put_request, make_geoserver_put_sld_request
from geonode.contrib.dataverse_styles.geonode_get_services import get_style_name_for_layer
LOGGER = logging.getLogger(__name__)
class StyleLayerMaker:
"""
Given Style Rules, create SLD XML and add it to a layer
Basic usage:
# Init object with an existing layer name
style_layer_maker = StyleLayerMaker('income_2so')
# Use some SLD info in XML format
sld_xml_content = open('test_rules.xml', 'r').read() # 'test_rules.xml' contains a SLD info in XML format
# Add sld_xml_content to the layer as the default style
success = style_layer_maker.add_sld_xml_to_layer(sld_xml_content)
# If operation failed, check error messages
if not success:
if style_layer_maker.err_found:
print ('\n'.join(err_msgs))
"""
def __init__(self, layer_name):
self.gs_catalog_obj = Layer.objects.gs_catalog
self.layer_name = layer_name
self.err_found = False
self.err_msgs = []
self.layer_metadata = None # LayerMetadata object
def add_err_msg(self, err_msg):
self.err_found = True
self.err_msgs.append(err_msg)
LOGGER.warn(err_msg)
def create_layer_metadata(self, layer_name):
if layer_name is None:
self.layer_metadata = None
return
#self.layer_metadata = LayerMetadata(**dict(geonode_layer_name=layer_name))
self.layer_metadata = LayerMetadata.create_metadata_using_layer_name(layer_name)
def get_layer_metadata(self):
"""Return a LayerMetadata object, if it exists"""
if self.layer_metadata:
return None
return self.layer_metadata
def add_sld_to_layer(self, formatted_sld_object):
# update layer via 2 PUT calls to the geoserver
return self.add_sld_xml_to_layer_via_puts(formatted_sld_object,\
self.layer_name)
# use direct python, but doesn't properly clear tile cache
#return self.add_sld_xml_to_layer(formatted_sld_object)
def get_url_to_set_sld_rules(self, style_name):
"""
Create url to set the new SLD to the layer via a put
#http://localhost:8000/gs/rest/styles/social_disorder_nydj_k_i_v.xml
This will be sent with a XML content containing the SLD rules
"""
if not style_name:
return None
# (1) Given the layer, retrieve the SLD containing the style name
#
# (to do)
# (2) Format the url for adding/retrieving styles
#
url_fragment = 'rest/styles/%s.xml' % (style_name)
full_url = urljoin(settings.GEOSERVER_BASE_URL, url_fragment)
return full_url
def get_set_default_style_url(self, layer_name):
"""
Given a layer name, return the REST url to set a default style
"""
if not layer_name:
return None
url_fragment = 'rest/layers/%s:%s' % (settings.DEFAULT_WORKSPACE, layer_name)
full_url = urljoin(settings.GEOSERVER_BASE_URL, url_fragment)
return full_url
def add_sld_xml_to_layer_via_puts(self, formatted_sld_object, layer_name):
if not formatted_sld_object or not layer_name:
return False
print '-' * 40
print 'formatted_sld_object.formatted_sld_xml'
print formatted_sld_object.formatted_sld_xml
print '-' * 40
# (1) Verify the XML
if not self.is_xml_verified(formatted_sld_object.formatted_sld_xml):
self.add_err_msg('The style information contains invalid XML')
return False
# (2) Set the new SLD to the layer via a put
#http://localhost:8000/gs/rest/styles/social_disorder_nydj_k_i_v.xml
# --------------------------------------
# Retrieve the style name for this layer
# --------------------------------------
(success, style_name_or_err_msg) = get_style_name_for_layer(layer_name)
if not success:
self.add_err_msg(style_name_or_err_msg)
return False
geoserver_sld_url = self.get_url_to_set_sld_rules(style_name_or_err_msg)
print 'geoserver_sld_url', geoserver_sld_url
print '-' * 40
print 'formatted_sld_object.formatted_sld_xml', formatted_sld_object.formatted_sld_xml
print '-' * 40
(response, content) = make_geoserver_put_sld_request(geoserver_sld_url, formatted_sld_object.formatted_sld_xml)
print 'response', response
print '-' * 40
print 'content', content
print '-' * 40
if response is None or not response.status == 200:
self.add_err_msg('Failed to set new style as the default')
return False
# (3) Set the new style as the default for the layer
# Send a PUT to the catalog to set the default style
json_str = """{"layer":{"defaultStyle":{"name":"%s"},"styles":{},"enabled":true}}""" % formatted_sld_object.sld_name
geoserver_json_url = self.get_set_default_style_url(self.layer_name)
if geoserver_json_url is None:
self.add_err_msg('Failed to format the url to set new style for layer: %s' % self.layer_name)
return False
(response, content) = make_geoserver_json_put_request(geoserver_json_url, json_str)
if response is None or not response.status in (200, 201):
self.add_err_msg('Failed to set new style as the default')
return False
self.create_layer_metadata(self.layer_name)
print '-' * 40
print ('layer %s saved with style %s' % (self.layer_name, formatted_sld_object.sld_name))
return True
def add_sld_xml_to_layer(self, formatted_sld_object):
"""
NOT USING, tiles were not getting refreshed properly
Keeping code around in case needed in the future
"""
if not formatted_sld_object:
return False
print 'type(formatted_sld_object)', type(formatted_sld_object)
# (1) Verify the XML
if not self.is_xml_verified(formatted_sld_object.formatted_sld_xml):
self.add_err_msg('The style information contains invalid XML')
return False
# (2) Retrieve the layer
layer_obj = self.gs_catalog_obj.get_layer(self.layer_name)
if layer_obj is None:
self.add_err_msg('The layer "%s" does not exist' % self.layer_name)
return False
self.show_layer_style_list(layer_obj)
#self.clear_alternate_style_list(layer_obj)
# (3) Create a style name
#stylename = self.layer_name + self.get_random_suffix()
#while self.is_style_name_in_catalog(stylename):
# stylename = self.layer_name + self.get_random_suffix()
style_name = formatted_sld_object.sld_name
# (4) Add the xml style to the catalog, with the new name
try:
# sync names
self.gs_catalog_obj.create_style(style_name, formatted_sld_object.formatted_sld_xml)
except:
self.add_err_msg('Failed to add style to the catalog: %s' % style_name)
return False
# (5) Pull the style object back from the catalog
new_style_obj = self.gs_catalog_obj.get_style(style_name)
if new_style_obj is None:
self.add_err_msg('Failed to find recently added style in the catalog: %s' % style_name)
return False
# (6) Set the new style as the default for the layer
layer_obj.default_style = new_style_obj
# Save it!
try:
self.gs_catalog_obj.save(layer_obj)
except:
self.add_err_msg('Failed to save new default style with layer' % (style_name))
return False
self.create_layer_metadata(self.layer_name)
print ('layer %s saved with style %s' % (self.layer_name, style_name))
return True
def get_random_suffix(self, num_chars=4):
return "_".join([choice('qwertyuiopasdfghjklzxcvbnm0123456789') for i in range(num_chars)])
def get_style_from_name(self, style_name):
"""
Get the style object from the style name
:returns: Style object or None
"""
if not style_name:
return None
return self.gs_catalog_obj.get_style(style_name)
def is_style_name_in_catalog(self, style_name):
"""
Is the style name in the Catalog?
"""
if not style_name:
return False
style_obj = self.get_style_from_name(style_name)
if style_obj is None:
return False
return True
def clear_alternate_style_list(self, layer_obj):
"""
Clear existing alternate styles from layer
(ask Matt how to delete a style)
"""
if not layer_obj.__class__.__name__ == 'Layer':
return False
# clear style list
layer_obj._set_alternate_styles([])
# save cleared list
self.gs_catalog_obj.save(layer_obj)
return True
def add_style_to_alternate_list(self, layer_obj, style_obj):
"""
Add a layer to the alternate list, to preserve it
"""
if not (layer_obj.__class__.__name__ == 'Layer' and style_obj.__class__.name == 'Style'):
return False
# get style list
alternate_layer_style_list = layer_obj._get_alternate_styles()
# does style already exist in list?
if self.is_style_name_in_catalog(style_obj.name) is True:
return False
# add new style to list
alternate_layer_style_list.append(style_obj)
# update the layer with the new list
layer_obj._set_alternate_styles(alternate_layer_style_list)
return True
#self.gs_catalog_obj.save(layer_obj)
def show_layer_style_list(self, layer_obj):
print('Show layer styles')
if not layer_obj.__class__.__name__ == 'Layer':
print ('not a layer', type(layer_obj))
return
sl = [layer_obj.default_style.name]
for s in layer_obj._get_alternate_styles():
sl.append(s.name)
for idx, sname in enumerate(sl):
if idx == 0:
print('%s (default)' % sname)
continue
print (sname)
def is_xml_verified(self, sld_xml_str):
if not sld_xml_str:
return False
try:
sldxml = XML(sld_xml_str)
valid_url = re.compile(settings.VALID_SLD_LINKS)
for elem in sldxml.iter(tag='{http://www.opengis.net/sld}OnlineResource'):
if '{http://www.w3.org/1999/xlink}href' in elem.attrib:
link = elem.attrib['{http://www.w3.org/1999/xlink}href']
if valid_url.match(link) is None:
err_msg = "External images in your SLD file are not permitted. Please contact us if you would like your SLD images hosted on %s" % (settings.SITENAME)
self.add_err_msg(err_msg)
return False
except ParseError, e:
self.add_err_msg('Your SLD file contains invalid XML')
return False
return True
if __name__=='__main__':
slm = StyleLayerMaker('income_2so')
sld_xml_content = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_rules', 'test_rules_03.xml'), 'r').read()
slm.add_sld_xml_to_layer(sld_xml_content)
|
cga-harvard/cga-worldmap
|
geonode/contrib/dataverse_styles/style_layer_maker.py
|
Python
|
gpl-3.0
| 12,317 | 0.004871 |
# encoding: utf-8
"""
Paging capabilities for IPython.core
Authors:
* Brian Granger
* Fernando Perez
Notes
-----
For now this uses ipapi, so it can't be in IPython.utils. If we can get
rid of that dependency, we could move it there.
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import re
import sys
import tempfile
from io import UnsupportedOperation
from IPython import get_ipython
from IPython.core.error import TryNext
from IPython.utils.data import chop
from IPython.utils import io
from IPython.utils.process import system
from IPython.utils.terminal import get_terminal_size
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
esc_re = re.compile(r"(\x1b[^m]+m)")
def page_dumb(strng, start=0, screen_lines=25):
"""Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode."""
out_ln = strng.splitlines()[start:]
screens = chop(out_ln, screen_lines - 1)
if len(screens) == 1:
print(os.linesep.join(screens[0]), file=io.stdout)
else:
last_escape = ""
for scr in screens[0:-1]:
hunk = os.linesep.join(scr)
print(last_escape + hunk, file=io.stdout)
if not page_more():
return
esc_list = esc_re.findall(hunk)
if len(esc_list) > 0:
last_escape = esc_list[-1]
print(last_escape + os.linesep.join(screens[-1]), file=io.stdout)
def _detect_screen_size(screen_lines_def):
"""Attempt to work out the number of lines on the screen.
This is called by page(). It can raise an error (e.g. when run in the
test suite), so it's separated out so it can easily be called in a try block.
"""
TERM = os.environ.get('TERM', None)
if not((TERM == 'xterm' or TERM == 'xterm-color') and sys.platform != 'sunos5'):
# curses causes problems on many terminals other than xterm, and
# some termios calls lock up on Sun OS5.
return screen_lines_def
try:
import termios
import curses
except ImportError:
return screen_lines_def
# There is a bug in curses, where *sometimes* it fails to properly
# initialize, and then after the endwin() call is made, the
# terminal is left in an unusable state. Rather than trying to
# check everytime for this (by requesting and comparing termios
# flags each time), we just save the initial terminal state and
# unconditionally reset it every time. It's cheaper than making
# the checks.
term_flags = termios.tcgetattr(sys.stdout)
# Curses modifies the stdout buffer size by default, which messes
# up Python's normal stdout buffering. This would manifest itself
# to IPython users as delayed printing on stdout after having used
# the pager.
#
# We can prevent this by manually setting the NCURSES_NO_SETBUF
# environment variable. For more details, see:
# http://bugs.python.org/issue10144
NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None)
os.environ['NCURSES_NO_SETBUF'] = ''
# Proceed with curses initialization
try:
scr = curses.initscr()
except AttributeError:
# Curses on Solaris may not be complete, so we can't use it there
return screen_lines_def
screen_lines_real, screen_cols = scr.getmaxyx()
curses.endwin()
# Restore environment
if NCURSES_NO_SETBUF is None:
del os.environ['NCURSES_NO_SETBUF']
else:
os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF
# Restore terminal state in case endwin() didn't.
termios.tcsetattr(sys.stdout, termios.TCSANOW, term_flags)
# Now we have what we needed: the screen size in rows/columns
return screen_lines_real
# print '***Screen size:',screen_lines_real,'lines x',\
# screen_cols,'columns.' # dbg
def page(strng, start=0, screen_lines=0, pager_cmd=None):
"""Display a string, piping through a pager after a certain length.
strng can be a mime-bundle dict, supplying multiple representations,
keyed by mime-type.
The screen_lines parameter specifies the number of *usable* lines of your
terminal screen (total lines minus lines you need to reserve to show other
information).
If you set screen_lines to a number <=0, page() will try to auto-determine
your screen size and will only use up to (screen_size+screen_lines) for
printing, paging after that. That is, if you want auto-detection but need
to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
auto-detection without any lines reserved simply use screen_lines = 0.
If a string won't fit in the allowed lines, it is sent through the
specified pager command. If none given, look for PAGER in the environment,
and ultimately default to less.
If no system pager works, the string is sent through a 'dumb pager'
written in python, very simplistic.
"""
# for compatibility with mime-bundle form:
if isinstance(strng, dict):
strng = strng['text/plain']
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
# first, try the hook
ip = get_ipython()
if ip:
try:
ip.hooks.show_in_pager(strng)
return
except TryNext:
pass
# Ugly kludge, but calling curses.initscr() flat out crashes in emacs
TERM = os.environ.get('TERM', 'dumb')
if TERM in ['dumb', 'emacs'] and os.name != 'nt':
print(strng)
return
# chop off the topmost part of the string we don't want to see
str_lines = strng.splitlines()[start:]
str_toprint = os.linesep.join(str_lines)
num_newlines = len(str_lines)
len_str = len(str_toprint)
# Dumb heuristics to guesstimate number of on-screen lines the string
# takes. Very basic, but good enough for docstrings in reasonable
# terminals. If someone later feels like refining it, it's not hard.
numlines = max(num_newlines, int(len_str / 80) + 1)
screen_lines_def = get_terminal_size()[1]
# auto-determine screen size
if screen_lines <= 0:
try:
screen_lines += _detect_screen_size(screen_lines_def)
except (TypeError, UnsupportedOperation):
print(str_toprint, file=io.stdout)
return
# print 'numlines',numlines,'screenlines',screen_lines # dbg
if numlines <= screen_lines:
# print '*** normal print' # dbg
print(str_toprint, file=io.stdout)
else:
# Try to open pager and default to internal one if that fails.
# All failure modes are tagged as 'retval=1', to match the return
# value of a failed system command. If any intermediate attempt
# sets retval to 1, at the end we resort to our own page_dumb() pager.
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd, start)
if os.name == 'nt':
if pager_cmd.startswith('type'):
# The default WinXP 'type' command is failing on complex
# strings.
retval = 1
else:
fd, tmpname = tempfile.mkstemp('.txt')
try:
os.close(fd)
with open(tmpname, 'wt') as tmpfile:
tmpfile.write(strng)
cmd = "%s < %s" % (pager_cmd, tmpname)
# tmpfile needs to be closed for windows
if os.system(cmd):
retval = 1
else:
retval = None
finally:
os.remove(tmpname)
else:
try:
retval = None
# if I use popen4, things hang. No idea why.
#pager,shell_out = os.popen4(pager_cmd)
pager = os.popen(pager_cmd, 'w')
try:
pager_encoding = pager.encoding or sys.stdout.encoding
pager.write(py3compat.cast_bytes_py2(
strng, encoding=pager_encoding))
finally:
retval = pager.close()
except IOError as msg: # broken pipe when user quits
if msg.args == (32, 'Broken pipe'):
retval = None
else:
retval = 1
except OSError:
# Other strange problems, sometimes seen in Win2k/cygwin
retval = 1
if retval is not None:
page_dumb(strng, screen_lines=screen_lines)
def page_file(fname, start=0, pager_cmd=None):
"""Page a file, using an optional pager command and starting line.
"""
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd, start)
try:
if os.environ['TERM'] in ['emacs', 'dumb']:
raise EnvironmentError
system(pager_cmd + ' ' + fname)
except:
try:
if start > 0:
start -= 1
page(open(fname).read(), start)
except:
print('Unable to show file', repr(fname))
def get_pager_cmd(pager_cmd=None):
"""Return a pager command.
Makes some attempts at finding an OS-correct one.
"""
if os.name == 'posix':
default_pager_cmd = 'less -r' # -r for color control sequences
elif os.name in ['nt', 'dos']:
default_pager_cmd = 'type'
if pager_cmd is None:
try:
pager_cmd = os.environ['PAGER']
except:
pager_cmd = default_pager_cmd
return pager_cmd
def get_pager_start(pager, start):
"""Return the string for paging files with an offset.
This is the '+N' argument which less and more (under Unix) accept.
"""
if pager in ['less', 'more']:
if start:
start_string = '+' + str(start)
else:
start_string = ''
else:
start_string = ''
return start_string
# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
if os.name == 'nt' and os.environ.get('TERM', 'dumb') != 'emacs':
import msvcrt
def page_more():
""" Smart pausing between pages
@return: True if need print more lines, False if quit
"""
io.stdout.write('---Return to continue, q to quit--- ')
ans = msvcrt.getwch()
if ans in ("q", "Q"):
result = False
else:
result = True
io.stdout.write("\b" * 37 + " " * 37 + "\b" * 37)
return result
else:
def page_more():
ans = py3compat.input('---Return to continue, q to quit--- ')
if ans.lower().startswith('q'):
return False
else:
return True
def snip_print(str, width=75, print_full=0, header=''):
"""Print a string snipping the midsection to fit in width.
print_full: mode control:
- 0: only snip long strings
- 1: send to page() directly.
- 2: snip long strings and ask for full length viewing with page()
Return 1 if snipping was necessary, 0 otherwise."""
if print_full == 1:
page(header + str)
return 0
print(header, end=' ')
if len(str) < width:
print(str)
snip = 0
else:
whalf = int((width - 5) / 2)
print(str[:whalf] + ' <...> ' + str[-whalf:])
snip = 1
if snip and print_full == 2:
if py3compat.input(header + ' Snipped. View (y/n)? [N]').lower() == 'y':
page(str)
return snip
|
mattvonrocketstein/smash
|
smashlib/ipy3x/core/page.py
|
Python
|
mit
| 12,350 | 0.001053 |
# Copyright (c) 2014 by pyramid_decoy authors and contributors
# <see AUTHORS file>
#
# This module is part of pyramid_decoy and is released under
# the MIT License (MIT): http://opensource.org/licenses/MIT
"""Main decoy module."""
__version__ = "0.2.0"
SETTINGS_PREFIX = "decoy"
def includeme(configurator):
"""
Configure decoy plugin on pyramid application.
:param pyramid.configurator.Configurator configurator: pyramid's
configurator object
"""
configurator.registry["decoy"] = get_decoy_settings(
configurator.get_settings()
)
configurator.add_route("decoy", pattern="/*p")
configurator.add_view("pyramid_decoy.views.decoy", route_name="decoy")
def get_decoy_settings(settings):
"""
Extract decoy settings out of all.
:param dict settings: pyramid app settings
:returns: decoy settings
:rtype: dict
"""
return {
k.split(".", 1)[-1]: v
for k, v in settings.items()
if k[: len(SETTINGS_PREFIX)] == SETTINGS_PREFIX
}
|
fizyk/pyramid_decoy
|
src/pyramid_decoy/__init__.py
|
Python
|
mit
| 1,031 | 0 |
from utils import secret
import requests
import re
import json
import os
def gf_families_ignore_camelcase():
"""Find family names in the GF collection which cannot be derived by
splitting the filename using a camelcase function e.g
VT323, PTSans.
If these filenames are split, they will be V T 323 and P T Sans."""
families = {}
api_url = 'https://www.googleapis.com/webfonts/v1/webfonts?key={}'.format(
secret('GF_API_KEY')
)
r = requests.get(api_url)
for item in r.json()["items"]:
if re.search(r"[A-Z]{2}", item['family']):
families[item["family"].replace(" ", "")] = item["family"]
return families
def main():
current_dir = os.path.dirname(__file__)
families = gf_families_ignore_camelcase()
out = os.path.join(current_dir, "gf_families_ignore_camelcase.json")
json.dump(families , open(out, 'w'))
if __name__ == "__main__":
main()
|
googlefonts/gfregression
|
Lib/gfregression/gf_families_ignore_camelcase.py
|
Python
|
apache-2.0
| 936 | 0.002137 |
import cffi
#
# This is only a demo based on the GMP library.
# There is a rather more complete (but perhaps outdated) version available at:
# http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files
#
ffibuilder = cffi.FFI()
ffibuilder.cdef("""
typedef struct { ...; } MP_INT;
typedef MP_INT mpz_t[1];
int mpz_init_set_str (MP_INT *dest_integer, char *src_cstring, int base);
void mpz_add (MP_INT *sum, MP_INT *addend1, MP_INT *addend2);
char * mpz_get_str (char *string, int base, MP_INT *integer);
""")
ffibuilder.set_source('_gmp_cffi', "#include <gmp.h>",
libraries=['gmp', 'm'])
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
|
hipnusleo/laserjet
|
resource/pypi/cffi-1.9.1/demo/gmp_build.py
|
Python
|
apache-2.0
| 733 | 0.001364 |
import sys
inFile = open(sys.argv[1],'r')
nuc = {'A':'T','T':'A','G':'C','C':'G','K':'M','M':'K','R':'Y','Y':'R','S':'W','W':'W','B':'V','V':'B','H':'G','D':'C','X':'N','N':'N'}
def revComp(seq):
rev = ''
for i in range(len(seq) - 1,-1,-1):
rev += nuc[seq[i]]
return rev
header = ''
seq = ''
for line in inFile:
if line[0] == ">":
if header != '':
print header
print revComp(seq.upper())
header = line.strip()
seq = ''
else:
seq += line.strip()
print header
print revComp(seq.upper())
|
pandeyravi15/SGMBL
|
script/revcomp.py
|
Python
|
gpl-3.0
| 576 | 0.064236 |
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from actstream.models import user_stream
from dnstorm.app import DNSTORM_URL
from dnstorm.app.utils import get_option
from dnstorm.app.models import Problem, Idea
from dnstorm.app.utils import get_option
def base(request):
"""
Provides basic variables used for all templates.
"""
context = dict()
context['dnstorm_url'] = DNSTORM_URL
# Links
if not context.get('site_title', None):
context['site_title'] = '%s | %s' % (
get_option('site_title'), get_option('site_description'))
context['site_url'] = get_option('site_url')
context['login_form'] = AuthenticationForm()
context['login_url'] = reverse('login') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
context['logout_url'] = reverse('logout') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
# Checks
context['is_update'] = 'update' in request.resolver_match.url_name
# Activity
context['user_activity'] = user_stream(request.user, with_user_activity=True) if request.user.is_authenticated() else None
context['user_activity_counter'] = get_option('user_%d_activity_counter' % request.user.id) if request.user.is_authenticated() else None
return context
|
vmassuchetto/dnstorm
|
dnstorm/app/context_processors.py
|
Python
|
gpl-2.0
| 1,367 | 0.003658 |
from __future__ import absolute_import, unicode_literals
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import django
from django.db.models.sql import compiler
import re
NEEDS_AGGREGATES_FIX = django.VERSION[:2] < (1, 7)
# query_class returns the base class to use for Django queries.
# The custom 'SqlServerQuery' class derives from django.db.models.sql.query.Query
# which is passed in as "QueryClass" by Django itself.
#
# SqlServerQuery overrides:
# ...insert queries to add "SET IDENTITY_INSERT" if needed.
# ...select queries to emulate LIMIT/OFFSET for sliced queries.
# Pattern to scan a column data type string and split the data type from any
# constraints or other included parts of a column definition. Based upon
# <column_definition> from http://msdn.microsoft.com/en-us/library/ms174979.aspx
_re_data_type_terminator = re.compile(
r'\s*\b(?:' +
r'filestream|collate|sparse|not|null|constraint|default|identity|rowguidcol' +
r'|primary|unique|clustered|nonclustered|with|on|foreign|references|check' +
')',
re.IGNORECASE,
)
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def compile(self, node):
"""
Added with Django 1.7 as a mechanism to evalute expressions
"""
sql_function = getattr(node, 'sql_function', None)
if sql_function and sql_function in self.connection.ops._sql_function_overrides:
sql_function, sql_template = self.connection.ops._sql_function_overrides[sql_function]
if sql_function:
node.sql_function = sql_function
if sql_template:
node.sql_template = sql_template
return super(SQLCompiler, self).compile(node)
def _fix_aggregates(self):
"""
MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1
"""
for alias, aggregate in self.query.aggregate_select.items():
sql_function = getattr(aggregate, 'sql_function', None)
if not sql_function or sql_function not in self.connection.ops._sql_function_overrides:
continue
sql_function, sql_template = self.connection.ops._sql_function_overrides[sql_function]
if sql_function:
self.query.aggregate_select[alias].sql_function = sql_function
if sql_template:
self.query.aggregate_select[alias].sql_template = sql_template
def as_sql(self, with_limits=True, with_col_aliases=False):
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
if NEEDS_AGGREGATES_FIX:
# Django 1.7+ provides SQLCompiler.compile as a hook
self._fix_aggregates()
# Get out of the way if we're not a select query or there's no limiting involved.
has_limit_offset = with_limits and (self.query.low_mark or self.query.high_mark is not None)
try:
if not has_limit_offset:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
# let the base do its thing, but we'll handle limit/offset
sql, fields = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
)
if has_limit_offset:
if ' order by ' not in sql.lower():
# Must have an ORDER BY to slice using OFFSET/FETCH. If
# there is none, use the first column, which is typically a
# PK
sql += ' ORDER BY 1'
sql += ' OFFSET %d ROWS' % (self.query.low_mark or 0)
if self.query.high_mark is not None:
sql += ' FETCH NEXT %d ROWS ONLY' % (self.query.high_mark - self.query.low_mark)
finally:
if not has_limit_offset:
# remove in case query is ever reused
delattr(self.query, '_mssql_ordering_not_allowed')
return sql, fields
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[1] == 1 and django.VERSION[2] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
# search for after table/column list
_re_values_sub = re.compile(
r'(?P<prefix>\)|\])(?P<default>\s*|\s*default\s*)values(?P<suffix>\s*|\s+\()?',
re.IGNORECASE
)
# ... and insert the OUTPUT clause between it and the values list (or DEFAULT VALUES).
_values_repl = r'\g<prefix> OUTPUT INSERTED.{col} INTO @sqlserver_ado_return_id\g<default>VALUES\g<suffix>'
def as_sql(self, *args, **kwargs):
# Fix for Django ticket #14019
if not hasattr(self, 'return_id'):
self.return_id = False
result = super(SQLInsertCompiler, self).as_sql(*args, **kwargs)
if isinstance(result, list):
# Django 1.4 wraps return in list
return [self._fix_insert(x[0], x[1]) for x in result]
sql, params = result
return self._fix_insert(sql, params)
def _fix_insert(self, sql, params):
"""
Wrap the passed SQL with IDENTITY_INSERT statements and apply
other necessary fixes.
"""
meta = self.query.get_meta()
if meta.has_auto_field:
if hasattr(self.query, 'fields'):
# django 1.4 replaced columns with fields
fields = self.query.fields
auto_field = meta.auto_field
else:
# < django 1.4
fields = self.query.columns
auto_field = meta.auto_field.db_column or meta.auto_field.column
auto_in_fields = auto_field in fields
quoted_table = self.connection.ops.quote_name(meta.db_table)
if not fields or (auto_in_fields and len(fields) == 1 and not params):
# convert format when inserting only the primary key without
# specifying a value
sql = 'INSERT INTO {0} DEFAULT VALUES'.format(
quoted_table
)
params = []
elif auto_in_fields:
# wrap with identity insert
sql = 'SET IDENTITY_INSERT {table} ON;{sql};SET IDENTITY_INSERT {table} OFF'.format(
table=quoted_table,
sql=sql,
)
# mangle SQL to return ID from insert
# http://msdn.microsoft.com/en-us/library/ms177564.aspx
if self.return_id and self.connection.features.can_return_id_from_insert:
col = self.connection.ops.quote_name(meta.pk.db_column or meta.pk.get_attname())
# Determine datatype for use with the table variable that will return the inserted ID
pk_db_type = _re_data_type_terminator.split(meta.pk.db_type(self.connection))[0]
# NOCOUNT ON to prevent additional trigger/stored proc related resultsets
sql = 'SET NOCOUNT ON;{declare_table_var};{sql};{select_return_id}'.format(
sql=sql,
declare_table_var="DECLARE @sqlserver_ado_return_id table ({col_name} {pk_type})".format(
col_name=col,
pk_type=pk_db_type,
),
select_return_id="SELECT * FROM @sqlserver_ado_return_id",
)
output = self._values_repl.format(col=col)
sql = self._re_values_sub.sub(output, sql)
return sql, params
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self):
sql, params = super(SQLUpdateCompiler, self).as_sql()
if sql:
# Need the NOCOUNT OFF so UPDATE returns a count, instead of -1
sql = 'SET NOCOUNT OFF; {0}; SET NOCOUNT ON'.format(sql)
return sql, params
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
def as_sql(self, qn=None):
self._fix_aggregates()
return super(SQLAggregateCompiler, self).as_sql(qn=qn)
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
try:
class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, SQLCompiler):
pass
except AttributeError:
pass
|
theoriginalgri/django-mssql
|
sqlserver_ado/compiler.py
|
Python
|
mit
| 10,087 | 0.002776 |
"""
Support for ISY994 fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.fan import (FanEntity, DOMAIN, SPEED_OFF,
SPEED_LOW, SPEED_MEDIUM,
SPEED_HIGH, SUPPORT_SET_SPEED)
from homeassistant.components.isy994 import (ISY994_NODES, ISY994_PROGRAMS,
ISYDevice)
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
VALUE_TO_STATE = {
0: SPEED_OFF,
63: SPEED_LOW,
64: SPEED_LOW,
190: SPEED_MEDIUM,
191: SPEED_MEDIUM,
255: SPEED_HIGH,
}
STATE_TO_VALUE = {}
for key in VALUE_TO_STATE:
STATE_TO_VALUE[VALUE_TO_STATE[key]] = key
def setup_platform(hass, config: ConfigType,
add_entities: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 fan platform."""
devices = []
for node in hass.data[ISY994_NODES][DOMAIN]:
devices.append(ISYFanDevice(node))
for name, status, actions in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYFanProgram(name, status, actions))
add_entities(devices)
class ISYFanDevice(ISYDevice, FanEntity):
"""Representation of an ISY994 fan device."""
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_STATE.get(self.value)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return self.value != 0
def set_speed(self, speed: str) -> None:
"""Send the set speed command to the ISY994 fan device."""
self._node.on(val=STATE_TO_VALUE.get(speed, 255))
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn on command to the ISY994 fan device."""
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
self._node.off()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
class ISYFanProgram(ISYFanDevice):
"""Representation of an ISY994 fan program."""
def __init__(self, name: str, node, actions) -> None:
"""Initialize the ISY994 fan program."""
super().__init__(node)
self._name = name
self._actions = actions
def turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not self._actions.runThen():
_LOGGER.error("Unable to turn off the fan")
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn off command to ISY994 fan program."""
if not self._actions.runElse():
_LOGGER.error("Unable to turn on the fan")
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0
|
PetePriority/home-assistant
|
homeassistant/components/isy994/fan.py
|
Python
|
apache-2.0
| 3,213 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoiced': True, 'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.write(cr,uid,ids,{'state':'cancel'})
for (id, name) in self.name_get(cr, uid, ids):
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.dest_address_id.id or order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'partner_id': order.dest_address_id.id or order.partner_id.id,
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id and order_line.move_dest_id.state != 'done':
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
procurement_ids_to_cancel = []
for line in self.browse(cr, uid, ids, context=context):
if line.move_dest_id:
procurement_ids_to_cancel.extend(procurement.id for procurement in line.move_dest_id.procurements)
if procurement_ids_to_cancel:
self.pool['procurement.order'].action_cancel(cr, uid, procurement_ids_to_cancel)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.supplier_price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.supplier_price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
wf_service = netsvc.LocalService("workflow")
for po_id in po_ids:
# Signal purchase order workflow that an invoice has been validated.
wf_service.trg_write(uid, 'purchase.order', po_id, cr)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eneldoserrata/marcos_openerp
|
addons/purchase/purchase.py
|
Python
|
agpl-3.0
| 67,788 | 0.007612 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sparsemat
else:
import _sparsemat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sparsemat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sparsemat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.globals
import mfem._par.vector
import mfem._par.operators
import mfem._par.matrix
import mfem._par.densemat
def RAP_P(A, R, ORAP):
r"""RAP_P(SparseMatrix A, SparseMatrix R, SparseMatrix ORAP) -> SparseMatrix"""
return _sparsemat.RAP_P(A, R, ORAP)
RAP_P = _sparsemat.RAP_P
def RAP_R(Rt, A, P):
r"""RAP_R(SparseMatrix Rt, SparseMatrix A, SparseMatrix P) -> SparseMatrix"""
return _sparsemat.RAP_R(Rt, A, P)
RAP_R = _sparsemat.RAP_R
def OperatorPtr2SparseMatrix(op):
r"""OperatorPtr2SparseMatrix(mfem::OperatorPtr op) -> SparseMatrix"""
return _sparsemat.OperatorPtr2SparseMatrix(op)
OperatorPtr2SparseMatrix = _sparsemat.OperatorPtr2SparseMatrix
def OperatorHandle2SparseMatrix(op):
r"""OperatorHandle2SparseMatrix(mfem::OperatorHandle op) -> SparseMatrix"""
return _sparsemat.OperatorHandle2SparseMatrix(op)
OperatorHandle2SparseMatrix = _sparsemat.OperatorHandle2SparseMatrix
class RowNode(object):
r"""Proxy of C++ mfem::RowNode class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Value = property(_sparsemat.RowNode_Value_get, _sparsemat.RowNode_Value_set, doc=r"""Value : double""")
Prev = property(_sparsemat.RowNode_Prev_get, _sparsemat.RowNode_Prev_set, doc=r"""Prev : p.mfem::RowNode""")
Column = property(_sparsemat.RowNode_Column_get, _sparsemat.RowNode_Column_set, doc=r"""Column : int""")
def __init__(self):
r"""__init__(RowNode self) -> RowNode"""
_sparsemat.RowNode_swiginit(self, _sparsemat.new_RowNode())
__swig_destroy__ = _sparsemat.delete_RowNode
# Register RowNode in _sparsemat:
_sparsemat.RowNode_swigregister(RowNode)
class SparseMatrix(mfem._par.matrix.AbstractSparseMatrix):
r"""Proxy of C++ mfem::SparseMatrix class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(SparseMatrix self) -> SparseMatrix
__init__(SparseMatrix self, int nrows, int ncols=-1) -> SparseMatrix
__init__(SparseMatrix self, int * i) -> SparseMatrix
__init__(SparseMatrix self, int * i, bool ownij, bool owna, bool issorted) -> SparseMatrix
__init__(SparseMatrix self, int nrows, int ncols, int rowsize) -> SparseMatrix
__init__(SparseMatrix self, SparseMatrix mat, bool copy_graph=True, mfem::MemoryType mt=PRESERVE) -> SparseMatrix
__init__(SparseMatrix self, Vector v) -> SparseMatrix
"""
import numpy as np
from scipy.sparse import csr_matrix
if len(args) == 1 and isinstance(args[0], csr_matrix):
csr = args[0]
if np.real(csr).dtype != 'float64':
csr = csr.astype('float64')
i = np.ascontiguousarray(csr.indptr)
j = np.ascontiguousarray(csr.indices)
data = np.ascontiguousarray(csr.data)
m, n = csr.shape
this = _sparsemat.new_SparseMatrix([i, j, data, m, n])
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
_sparsemat.SparseMatrix_SetGraphOwner(self, False)
_sparsemat.SparseMatrix_SetDataOwner(self, False)
self._i_data = i
self._j_data = j
self._d_data = data
return
_sparsemat.SparseMatrix_swiginit(self, _sparsemat.new_SparseMatrix(*args))
def UseGPUSparse(self, useGPUSparse_=True):
r"""UseGPUSparse(SparseMatrix self, bool useGPUSparse_=True)"""
return _sparsemat.SparseMatrix_UseGPUSparse(self, useGPUSparse_)
UseGPUSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_UseGPUSparse)
def UseCuSparse(self, useCuSparse_=True):
r"""UseCuSparse(SparseMatrix self, bool useCuSparse_=True)"""
return _sparsemat.SparseMatrix_UseCuSparse(self, useCuSparse_)
UseCuSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_UseCuSparse)
def MakeRef(self, master):
r"""MakeRef(SparseMatrix self, SparseMatrix master)"""
return _sparsemat.SparseMatrix_MakeRef(self, master)
MakeRef = _swig_new_instance_method(_sparsemat.SparseMatrix_MakeRef)
def Size(self):
r"""Size(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_Size(self)
Size = _swig_new_instance_method(_sparsemat.SparseMatrix_Size)
def Clear(self):
r"""Clear(SparseMatrix self)"""
return _sparsemat.SparseMatrix_Clear(self)
Clear = _swig_new_instance_method(_sparsemat.SparseMatrix_Clear)
def ClearGPUSparse(self):
r"""ClearGPUSparse(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearGPUSparse(self)
ClearGPUSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearGPUSparse)
def ClearCuSparse(self):
r"""ClearCuSparse(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearCuSparse(self)
ClearCuSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearCuSparse)
def Empty(self):
r"""Empty(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_Empty(self)
Empty = _swig_new_instance_method(_sparsemat.SparseMatrix_Empty)
def GetI(self, *args):
r"""
GetI(SparseMatrix self) -> int
GetI(SparseMatrix self) -> int const *
"""
return _sparsemat.SparseMatrix_GetI(self, *args)
GetI = _swig_new_instance_method(_sparsemat.SparseMatrix_GetI)
def GetJ(self, *args):
r"""
GetJ(SparseMatrix self) -> int
GetJ(SparseMatrix self) -> int const *
"""
return _sparsemat.SparseMatrix_GetJ(self, *args)
GetJ = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJ)
def GetData(self, *args):
r"""
GetData(SparseMatrix self) -> double
GetData(SparseMatrix self) -> double const *
"""
return _sparsemat.SparseMatrix_GetData(self, *args)
GetData = _swig_new_instance_method(_sparsemat.SparseMatrix_GetData)
def GetMemoryI(self, *args):
r"""
GetMemoryI(SparseMatrix self) -> mfem::Memory< int >
GetMemoryI(SparseMatrix self) -> mfem::Memory< int > const &
"""
return _sparsemat.SparseMatrix_GetMemoryI(self, *args)
GetMemoryI = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryI)
def ReadI(self, on_dev=True):
r"""ReadI(SparseMatrix self, bool on_dev=True) -> int const *"""
return _sparsemat.SparseMatrix_ReadI(self, on_dev)
ReadI = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadI)
def WriteI(self, on_dev=True):
r"""WriteI(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_WriteI(self, on_dev)
WriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_WriteI)
def ReadWriteI(self, on_dev=True):
r"""ReadWriteI(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_ReadWriteI(self, on_dev)
ReadWriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadWriteI)
def HostReadI(self):
r"""HostReadI(SparseMatrix self) -> int const *"""
return _sparsemat.SparseMatrix_HostReadI(self)
HostReadI = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadI)
def HostWriteI(self):
r"""HostWriteI(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostWriteI(self)
HostWriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_HostWriteI)
def HostReadWriteI(self):
r"""HostReadWriteI(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostReadWriteI(self)
HostReadWriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadWriteI)
def GetMemoryJ(self, *args):
r"""
GetMemoryJ(SparseMatrix self) -> mfem::Memory< int >
GetMemoryJ(SparseMatrix self) -> mfem::Memory< int > const &
"""
return _sparsemat.SparseMatrix_GetMemoryJ(self, *args)
GetMemoryJ = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryJ)
def ReadJ(self, on_dev=True):
r"""ReadJ(SparseMatrix self, bool on_dev=True) -> int const *"""
return _sparsemat.SparseMatrix_ReadJ(self, on_dev)
ReadJ = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadJ)
def WriteJ(self, on_dev=True):
r"""WriteJ(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_WriteJ(self, on_dev)
WriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_WriteJ)
def ReadWriteJ(self, on_dev=True):
r"""ReadWriteJ(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_ReadWriteJ(self, on_dev)
ReadWriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadWriteJ)
def HostReadJ(self):
r"""HostReadJ(SparseMatrix self) -> int const *"""
return _sparsemat.SparseMatrix_HostReadJ(self)
HostReadJ = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadJ)
def HostWriteJ(self):
r"""HostWriteJ(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostWriteJ(self)
HostWriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_HostWriteJ)
def HostReadWriteJ(self):
r"""HostReadWriteJ(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostReadWriteJ(self)
HostReadWriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadWriteJ)
def GetMemoryData(self, *args):
r"""
GetMemoryData(SparseMatrix self) -> mfem::Memory< double >
GetMemoryData(SparseMatrix self) -> mfem::Memory< double > const &
"""
return _sparsemat.SparseMatrix_GetMemoryData(self, *args)
GetMemoryData = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryData)
def ReadData(self, on_dev=True):
r"""ReadData(SparseMatrix self, bool on_dev=True) -> double const *"""
return _sparsemat.SparseMatrix_ReadData(self, on_dev)
ReadData = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadData)
def WriteData(self, on_dev=True):
r"""WriteData(SparseMatrix self, bool on_dev=True) -> double *"""
return _sparsemat.SparseMatrix_WriteData(self, on_dev)
WriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_WriteData)
def ReadWriteData(self, on_dev=True):
r"""ReadWriteData(SparseMatrix self, bool on_dev=True) -> double *"""
return _sparsemat.SparseMatrix_ReadWriteData(self, on_dev)
ReadWriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadWriteData)
def HostReadData(self):
r"""HostReadData(SparseMatrix self) -> double const *"""
return _sparsemat.SparseMatrix_HostReadData(self)
HostReadData = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadData)
def HostWriteData(self):
r"""HostWriteData(SparseMatrix self) -> double *"""
return _sparsemat.SparseMatrix_HostWriteData(self)
HostWriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_HostWriteData)
def HostReadWriteData(self):
r"""HostReadWriteData(SparseMatrix self) -> double *"""
return _sparsemat.SparseMatrix_HostReadWriteData(self)
HostReadWriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadWriteData)
def RowSize(self, i):
r"""RowSize(SparseMatrix self, int const i) -> int"""
return _sparsemat.SparseMatrix_RowSize(self, i)
RowSize = _swig_new_instance_method(_sparsemat.SparseMatrix_RowSize)
def MaxRowSize(self):
r"""MaxRowSize(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_MaxRowSize(self)
MaxRowSize = _swig_new_instance_method(_sparsemat.SparseMatrix_MaxRowSize)
def GetRowColumns(self, *args):
r"""
GetRowColumns(SparseMatrix self, int const row) -> int
GetRowColumns(SparseMatrix self, int const row) -> int const *
"""
return _sparsemat.SparseMatrix_GetRowColumns(self, *args)
GetRowColumns = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowColumns)
def GetRowEntries(self, *args):
r"""
GetRowEntries(SparseMatrix self, int const row) -> double
GetRowEntries(SparseMatrix self, int const row) -> double const *
"""
return _sparsemat.SparseMatrix_GetRowEntries(self, *args)
GetRowEntries = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowEntries)
def SetWidth(self, width_=-1):
r"""SetWidth(SparseMatrix self, int width_=-1)"""
return _sparsemat.SparseMatrix_SetWidth(self, width_)
SetWidth = _swig_new_instance_method(_sparsemat.SparseMatrix_SetWidth)
def ActualWidth(self):
r"""ActualWidth(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_ActualWidth(self)
ActualWidth = _swig_new_instance_method(_sparsemat.SparseMatrix_ActualWidth)
def SortColumnIndices(self):
r"""SortColumnIndices(SparseMatrix self)"""
return _sparsemat.SparseMatrix_SortColumnIndices(self)
SortColumnIndices = _swig_new_instance_method(_sparsemat.SparseMatrix_SortColumnIndices)
def MoveDiagonalFirst(self):
r"""MoveDiagonalFirst(SparseMatrix self)"""
return _sparsemat.SparseMatrix_MoveDiagonalFirst(self)
MoveDiagonalFirst = _swig_new_instance_method(_sparsemat.SparseMatrix_MoveDiagonalFirst)
def Elem(self, *args):
r"""
Elem(SparseMatrix self, int i, int j) -> double
Elem(SparseMatrix self, int i, int j) -> double const &
"""
return _sparsemat.SparseMatrix_Elem(self, *args)
Elem = _swig_new_instance_method(_sparsemat.SparseMatrix_Elem)
def __call__(self, *args):
r"""
__call__(SparseMatrix self, int i, int j) -> double
__call__(SparseMatrix self, int i, int j) -> double const &
"""
return _sparsemat.SparseMatrix___call__(self, *args)
__call__ = _swig_new_instance_method(_sparsemat.SparseMatrix___call__)
def GetDiag(self, d):
r"""GetDiag(SparseMatrix self, Vector d)"""
return _sparsemat.SparseMatrix_GetDiag(self, d)
GetDiag = _swig_new_instance_method(_sparsemat.SparseMatrix_GetDiag)
def ToDenseMatrix(self, *args):
r"""
ToDenseMatrix(SparseMatrix self) -> DenseMatrix
ToDenseMatrix(SparseMatrix self, DenseMatrix B)
"""
return _sparsemat.SparseMatrix_ToDenseMatrix(self, *args)
ToDenseMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_ToDenseMatrix)
def GetMemoryClass(self):
r"""GetMemoryClass(SparseMatrix self) -> mfem::MemoryClass"""
return _sparsemat.SparseMatrix_GetMemoryClass(self)
GetMemoryClass = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryClass)
def Mult(self, x, y):
r"""Mult(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_Mult(self, x, y)
Mult = _swig_new_instance_method(_sparsemat.SparseMatrix_Mult)
def AddMult(self, x, y, a=1.0):
r"""AddMult(SparseMatrix self, Vector x, Vector y, double const a=1.0)"""
return _sparsemat.SparseMatrix_AddMult(self, x, y, a)
AddMult = _swig_new_instance_method(_sparsemat.SparseMatrix_AddMult)
def MultTranspose(self, x, y):
r"""MultTranspose(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_MultTranspose(self, x, y)
MultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_MultTranspose)
def AddMultTranspose(self, x, y, a=1.0):
r"""AddMultTranspose(SparseMatrix self, Vector x, Vector y, double const a=1.0)"""
return _sparsemat.SparseMatrix_AddMultTranspose(self, x, y, a)
AddMultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_AddMultTranspose)
def BuildTranspose(self):
r"""BuildTranspose(SparseMatrix self)"""
return _sparsemat.SparseMatrix_BuildTranspose(self)
BuildTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_BuildTranspose)
def ResetTranspose(self):
r"""ResetTranspose(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ResetTranspose(self)
ResetTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_ResetTranspose)
def PartMult(self, rows, x, y):
r"""PartMult(SparseMatrix self, intArray rows, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_PartMult(self, rows, x, y)
PartMult = _swig_new_instance_method(_sparsemat.SparseMatrix_PartMult)
def PartAddMult(self, rows, x, y, a=1.0):
r"""PartAddMult(SparseMatrix self, intArray rows, Vector x, Vector y, double const a=1.0)"""
return _sparsemat.SparseMatrix_PartAddMult(self, rows, x, y, a)
PartAddMult = _swig_new_instance_method(_sparsemat.SparseMatrix_PartAddMult)
def BooleanMult(self, x, y):
r"""BooleanMult(SparseMatrix self, intArray x, intArray y)"""
return _sparsemat.SparseMatrix_BooleanMult(self, x, y)
BooleanMult = _swig_new_instance_method(_sparsemat.SparseMatrix_BooleanMult)
def BooleanMultTranspose(self, x, y):
r"""BooleanMultTranspose(SparseMatrix self, intArray x, intArray y)"""
return _sparsemat.SparseMatrix_BooleanMultTranspose(self, x, y)
BooleanMultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_BooleanMultTranspose)
def AbsMult(self, x, y):
r"""AbsMult(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_AbsMult(self, x, y)
AbsMult = _swig_new_instance_method(_sparsemat.SparseMatrix_AbsMult)
def AbsMultTranspose(self, x, y):
r"""AbsMultTranspose(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_AbsMultTranspose(self, x, y)
AbsMultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_AbsMultTranspose)
def InnerProduct(self, x, y):
r"""InnerProduct(SparseMatrix self, Vector x, Vector y) -> double"""
return _sparsemat.SparseMatrix_InnerProduct(self, x, y)
InnerProduct = _swig_new_instance_method(_sparsemat.SparseMatrix_InnerProduct)
def GetRowSums(self, x):
r"""GetRowSums(SparseMatrix self, Vector x)"""
return _sparsemat.SparseMatrix_GetRowSums(self, x)
GetRowSums = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowSums)
def GetRowNorml1(self, irow):
r"""GetRowNorml1(SparseMatrix self, int irow) -> double"""
return _sparsemat.SparseMatrix_GetRowNorml1(self, irow)
GetRowNorml1 = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowNorml1)
def Inverse(self):
r"""Inverse(SparseMatrix self) -> MatrixInverse"""
return _sparsemat.SparseMatrix_Inverse(self)
Inverse = _swig_new_instance_method(_sparsemat.SparseMatrix_Inverse)
def EliminateRow(self, *args):
r"""
EliminateRow(SparseMatrix self, int row, double const sol, Vector rhs)
EliminateRow(SparseMatrix self, int row, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ZERO)
"""
return _sparsemat.SparseMatrix_EliminateRow(self, *args)
EliminateRow = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRow)
def EliminateCol(self, *args, **kwargs):
r"""EliminateCol(SparseMatrix self, int col, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ZERO)"""
return _sparsemat.SparseMatrix_EliminateCol(self, *args, **kwargs)
EliminateCol = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateCol)
def EliminateCols(self, *args):
r"""
EliminateCols(SparseMatrix self, intArray cols, Vector x=None, Vector b=None)
EliminateCols(SparseMatrix self, intArray col_marker, SparseMatrix Ae)
"""
return _sparsemat.SparseMatrix_EliminateCols(self, *args)
EliminateCols = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateCols)
def EliminateRowColMultipleRHS(self, *args, **kwargs):
r"""EliminateRowColMultipleRHS(SparseMatrix self, int rc, Vector sol, DenseMatrix rhs, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)"""
return _sparsemat.SparseMatrix_EliminateRowColMultipleRHS(self, *args, **kwargs)
EliminateRowColMultipleRHS = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRowColMultipleRHS)
def EliminateRowColDiag(self, rc, value):
r"""EliminateRowColDiag(SparseMatrix self, int rc, double value)"""
return _sparsemat.SparseMatrix_EliminateRowColDiag(self, rc, value)
EliminateRowColDiag = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRowColDiag)
def EliminateRowCol(self, *args):
r"""
EliminateRowCol(SparseMatrix self, int rc, double const sol, Vector rhs, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)
EliminateRowCol(SparseMatrix self, int rc, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)
EliminateRowCol(SparseMatrix self, int rc, SparseMatrix Ae, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)
"""
return _sparsemat.SparseMatrix_EliminateRowCol(self, *args)
EliminateRowCol = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRowCol)
def SetDiagIdentity(self):
r"""SetDiagIdentity(SparseMatrix self)"""
return _sparsemat.SparseMatrix_SetDiagIdentity(self)
SetDiagIdentity = _swig_new_instance_method(_sparsemat.SparseMatrix_SetDiagIdentity)
def EliminateZeroRows(self, threshold=1e-12):
r"""EliminateZeroRows(SparseMatrix self, double const threshold=1e-12)"""
return _sparsemat.SparseMatrix_EliminateZeroRows(self, threshold)
EliminateZeroRows = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateZeroRows)
def Gauss_Seidel_forw(self, x, y):
r"""Gauss_Seidel_forw(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_Gauss_Seidel_forw(self, x, y)
Gauss_Seidel_forw = _swig_new_instance_method(_sparsemat.SparseMatrix_Gauss_Seidel_forw)
def Gauss_Seidel_back(self, x, y):
r"""Gauss_Seidel_back(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_Gauss_Seidel_back(self, x, y)
Gauss_Seidel_back = _swig_new_instance_method(_sparsemat.SparseMatrix_Gauss_Seidel_back)
def GetJacobiScaling(self):
r"""GetJacobiScaling(SparseMatrix self) -> double"""
return _sparsemat.SparseMatrix_GetJacobiScaling(self)
GetJacobiScaling = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJacobiScaling)
def Jacobi(self, b, x0, x1, sc, use_abs_diag=False):
r"""Jacobi(SparseMatrix self, Vector b, Vector x0, Vector x1, double sc, bool use_abs_diag=False)"""
return _sparsemat.SparseMatrix_Jacobi(self, b, x0, x1, sc, use_abs_diag)
Jacobi = _swig_new_instance_method(_sparsemat.SparseMatrix_Jacobi)
def DiagScale(self, b, x, sc=1.0, use_abs_diag=False):
r"""DiagScale(SparseMatrix self, Vector b, Vector x, double sc=1.0, bool use_abs_diag=False)"""
return _sparsemat.SparseMatrix_DiagScale(self, b, x, sc, use_abs_diag)
DiagScale = _swig_new_instance_method(_sparsemat.SparseMatrix_DiagScale)
def Jacobi2(self, b, x0, x1, sc=1.0):
r"""Jacobi2(SparseMatrix self, Vector b, Vector x0, Vector x1, double sc=1.0)"""
return _sparsemat.SparseMatrix_Jacobi2(self, b, x0, x1, sc)
Jacobi2 = _swig_new_instance_method(_sparsemat.SparseMatrix_Jacobi2)
def Jacobi3(self, b, x0, x1, sc=1.0):
r"""Jacobi3(SparseMatrix self, Vector b, Vector x0, Vector x1, double sc=1.0)"""
return _sparsemat.SparseMatrix_Jacobi3(self, b, x0, x1, sc)
Jacobi3 = _swig_new_instance_method(_sparsemat.SparseMatrix_Jacobi3)
def Finalize(self, *args):
r"""
Finalize(SparseMatrix self, int skip_zeros=1)
Finalize(SparseMatrix self, int skip_zeros, bool fix_empty_rows)
"""
return _sparsemat.SparseMatrix_Finalize(self, *args)
Finalize = _swig_new_instance_method(_sparsemat.SparseMatrix_Finalize)
def Finalized(self):
r"""Finalized(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_Finalized(self)
Finalized = _swig_new_instance_method(_sparsemat.SparseMatrix_Finalized)
def ColumnsAreSorted(self):
r"""ColumnsAreSorted(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_ColumnsAreSorted(self)
ColumnsAreSorted = _swig_new_instance_method(_sparsemat.SparseMatrix_ColumnsAreSorted)
def Threshold(self, tol, fix_empty_rows=False):
r"""Threshold(SparseMatrix self, double tol, bool fix_empty_rows=False)"""
return _sparsemat.SparseMatrix_Threshold(self, tol, fix_empty_rows)
Threshold = _swig_new_instance_method(_sparsemat.SparseMatrix_Threshold)
def GetBlocks(self, blocks):
r"""GetBlocks(SparseMatrix self, mfem::Array2D< mfem::SparseMatrix * > & blocks)"""
return _sparsemat.SparseMatrix_GetBlocks(self, blocks)
GetBlocks = _swig_new_instance_method(_sparsemat.SparseMatrix_GetBlocks)
def GetSubMatrix(self, rows, cols, subm):
r"""GetSubMatrix(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm)"""
return _sparsemat.SparseMatrix_GetSubMatrix(self, rows, cols, subm)
GetSubMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_GetSubMatrix)
def SetColPtr(self, row):
r"""SetColPtr(SparseMatrix self, int const row)"""
return _sparsemat.SparseMatrix_SetColPtr(self, row)
SetColPtr = _swig_new_instance_method(_sparsemat.SparseMatrix_SetColPtr)
def ClearColPtr(self):
r"""ClearColPtr(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearColPtr(self)
ClearColPtr = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearColPtr)
def _Get_(self, col):
r"""_Get_(SparseMatrix self, int const col) -> double"""
return _sparsemat.SparseMatrix__Get_(self, col)
_Get_ = _swig_new_instance_method(_sparsemat.SparseMatrix__Get_)
def SearchRow(self, *args):
r"""
SearchRow(SparseMatrix self, int const col) -> double
SearchRow(SparseMatrix self, int const row, int const col) -> double &
"""
return _sparsemat.SparseMatrix_SearchRow(self, *args)
SearchRow = _swig_new_instance_method(_sparsemat.SparseMatrix_SearchRow)
def _Add_(self, *args):
r"""
_Add_(SparseMatrix self, int const col, double const a)
_Add_(SparseMatrix self, int const row, int const col, double const a)
"""
return _sparsemat.SparseMatrix__Add_(self, *args)
_Add_ = _swig_new_instance_method(_sparsemat.SparseMatrix__Add_)
def _Set_(self, *args):
r"""
_Set_(SparseMatrix self, int const col, double const a)
_Set_(SparseMatrix self, int const row, int const col, double const a)
"""
return _sparsemat.SparseMatrix__Set_(self, *args)
_Set_ = _swig_new_instance_method(_sparsemat.SparseMatrix__Set_)
def Set(self, i, j, a):
r"""Set(SparseMatrix self, int const i, int const j, double const a)"""
return _sparsemat.SparseMatrix_Set(self, i, j, a)
Set = _swig_new_instance_method(_sparsemat.SparseMatrix_Set)
def SetSubMatrix(self, rows, cols, subm, skip_zeros=1):
r"""SetSubMatrix(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm, int skip_zeros=1)"""
return _sparsemat.SparseMatrix_SetSubMatrix(self, rows, cols, subm, skip_zeros)
SetSubMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_SetSubMatrix)
def SetSubMatrixTranspose(self, rows, cols, subm, skip_zeros=1):
r"""SetSubMatrixTranspose(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm, int skip_zeros=1)"""
return _sparsemat.SparseMatrix_SetSubMatrixTranspose(self, rows, cols, subm, skip_zeros)
SetSubMatrixTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_SetSubMatrixTranspose)
def AddSubMatrix(self, rows, cols, subm, skip_zeros=1):
r"""AddSubMatrix(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm, int skip_zeros=1)"""
return _sparsemat.SparseMatrix_AddSubMatrix(self, rows, cols, subm, skip_zeros)
AddSubMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_AddSubMatrix)
def RowIsEmpty(self, row):
r"""RowIsEmpty(SparseMatrix self, int const row) -> bool"""
return _sparsemat.SparseMatrix_RowIsEmpty(self, row)
RowIsEmpty = _swig_new_instance_method(_sparsemat.SparseMatrix_RowIsEmpty)
def GetRow(self, row, cols, srow):
r"""GetRow(SparseMatrix self, int const row, intArray cols, Vector srow) -> int"""
return _sparsemat.SparseMatrix_GetRow(self, row, cols, srow)
GetRow = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRow)
def SetRow(self, row, cols, srow):
r"""SetRow(SparseMatrix self, int const row, intArray cols, Vector srow)"""
return _sparsemat.SparseMatrix_SetRow(self, row, cols, srow)
SetRow = _swig_new_instance_method(_sparsemat.SparseMatrix_SetRow)
def AddRow(self, row, cols, srow):
r"""AddRow(SparseMatrix self, int const row, intArray cols, Vector srow)"""
return _sparsemat.SparseMatrix_AddRow(self, row, cols, srow)
AddRow = _swig_new_instance_method(_sparsemat.SparseMatrix_AddRow)
def ScaleRow(self, row, scale):
r"""ScaleRow(SparseMatrix self, int const row, double const scale)"""
return _sparsemat.SparseMatrix_ScaleRow(self, row, scale)
ScaleRow = _swig_new_instance_method(_sparsemat.SparseMatrix_ScaleRow)
def ScaleRows(self, sl):
r"""ScaleRows(SparseMatrix self, Vector sl)"""
return _sparsemat.SparseMatrix_ScaleRows(self, sl)
ScaleRows = _swig_new_instance_method(_sparsemat.SparseMatrix_ScaleRows)
def ScaleColumns(self, sr):
r"""ScaleColumns(SparseMatrix self, Vector sr)"""
return _sparsemat.SparseMatrix_ScaleColumns(self, sr)
ScaleColumns = _swig_new_instance_method(_sparsemat.SparseMatrix_ScaleColumns)
def __iadd__(self, B):
r"""__iadd__(SparseMatrix self, SparseMatrix B) -> SparseMatrix"""
val = _sparsemat.SparseMatrix___iadd__(self, B)
val.thisown = 0
return self
return val
def Add(self, *args):
r"""
Add(SparseMatrix self, int const i, int const j, double const a)
Add(SparseMatrix self, double const a, SparseMatrix B)
"""
return _sparsemat.SparseMatrix_Add(self, *args)
Add = _swig_new_instance_method(_sparsemat.SparseMatrix_Add)
def __imul__(self, a):
r"""__imul__(SparseMatrix self, double a) -> SparseMatrix"""
val = _sparsemat.SparseMatrix___imul__(self, a)
val.thisown = 0
return self
return val
def IsSymmetric(self):
r"""IsSymmetric(SparseMatrix self) -> double"""
return _sparsemat.SparseMatrix_IsSymmetric(self)
IsSymmetric = _swig_new_instance_method(_sparsemat.SparseMatrix_IsSymmetric)
def Symmetrize(self):
r"""Symmetrize(SparseMatrix self)"""
return _sparsemat.SparseMatrix_Symmetrize(self)
Symmetrize = _swig_new_instance_method(_sparsemat.SparseMatrix_Symmetrize)
def NumNonZeroElems(self):
r"""NumNonZeroElems(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_NumNonZeroElems(self)
NumNonZeroElems = _swig_new_instance_method(_sparsemat.SparseMatrix_NumNonZeroElems)
def MaxNorm(self):
r"""MaxNorm(SparseMatrix self) -> double"""
return _sparsemat.SparseMatrix_MaxNorm(self)
MaxNorm = _swig_new_instance_method(_sparsemat.SparseMatrix_MaxNorm)
def CountSmallElems(self, tol):
r"""CountSmallElems(SparseMatrix self, double tol) -> int"""
return _sparsemat.SparseMatrix_CountSmallElems(self, tol)
CountSmallElems = _swig_new_instance_method(_sparsemat.SparseMatrix_CountSmallElems)
def CheckFinite(self):
r"""CheckFinite(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_CheckFinite(self)
CheckFinite = _swig_new_instance_method(_sparsemat.SparseMatrix_CheckFinite)
def SetGraphOwner(self, ownij):
r"""SetGraphOwner(SparseMatrix self, bool ownij)"""
return _sparsemat.SparseMatrix_SetGraphOwner(self, ownij)
SetGraphOwner = _swig_new_instance_method(_sparsemat.SparseMatrix_SetGraphOwner)
def SetDataOwner(self, owna):
r"""SetDataOwner(SparseMatrix self, bool owna)"""
return _sparsemat.SparseMatrix_SetDataOwner(self, owna)
SetDataOwner = _swig_new_instance_method(_sparsemat.SparseMatrix_SetDataOwner)
def OwnsGraph(self):
r"""OwnsGraph(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_OwnsGraph(self)
OwnsGraph = _swig_new_instance_method(_sparsemat.SparseMatrix_OwnsGraph)
def OwnsData(self):
r"""OwnsData(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_OwnsData(self)
OwnsData = _swig_new_instance_method(_sparsemat.SparseMatrix_OwnsData)
def LoseData(self):
r"""LoseData(SparseMatrix self)"""
return _sparsemat.SparseMatrix_LoseData(self)
LoseData = _swig_new_instance_method(_sparsemat.SparseMatrix_LoseData)
def Swap(self, other):
r"""Swap(SparseMatrix self, SparseMatrix other)"""
return _sparsemat.SparseMatrix_Swap(self, other)
Swap = _swig_new_instance_method(_sparsemat.SparseMatrix_Swap)
__swig_destroy__ = _sparsemat.delete_SparseMatrix
def GetType(self):
r"""GetType(SparseMatrix self) -> mfem::Operator::Type"""
return _sparsemat.SparseMatrix_GetType(self)
GetType = _swig_new_instance_method(_sparsemat.SparseMatrix_GetType)
def GetIArray(self):
r"""GetIArray(SparseMatrix self) -> PyObject *"""
return _sparsemat.SparseMatrix_GetIArray(self)
GetIArray = _swig_new_instance_method(_sparsemat.SparseMatrix_GetIArray)
def GetJArray(self):
r"""GetJArray(SparseMatrix self) -> PyObject *"""
return _sparsemat.SparseMatrix_GetJArray(self)
GetJArray = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJArray)
def GetDataArray(self):
r"""GetDataArray(SparseMatrix self) -> PyObject *"""
return _sparsemat.SparseMatrix_GetDataArray(self)
GetDataArray = _swig_new_instance_method(_sparsemat.SparseMatrix_GetDataArray)
def Print(self, *args):
r"""
Print(SparseMatrix self, std::ostream & out=out, int width_=4)
Print(SparseMatrix self, char const * file, int precision=16)
"""
return _sparsemat.SparseMatrix_Print(self, *args)
Print = _swig_new_instance_method(_sparsemat.SparseMatrix_Print)
def PrintGZ(self, file, precision=16):
r"""PrintGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintGZ(self, file, precision)
PrintGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintGZ)
def PrintMatlab(self, *args):
r"""
PrintMatlab(SparseMatrix self, std::ostream & out=out)
PrintMatlab(SparseMatrix self, char const * file, int precision=16)
"""
return _sparsemat.SparseMatrix_PrintMatlab(self, *args)
PrintMatlab = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMatlab)
def PrintMatlabGZ(self, file, precision=16):
r"""PrintMatlabGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintMatlabGZ(self, file, precision)
PrintMatlabGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMatlabGZ)
def PrintMM(self, *args):
r"""
PrintMM(SparseMatrix self, std::ostream & out=out)
PrintMM(SparseMatrix self, char const * file, int precision=16)
"""
return _sparsemat.SparseMatrix_PrintMM(self, *args)
PrintMM = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMM)
def PrintMMGZ(self, file, precision=16):
r"""PrintMMGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintMMGZ(self, file, precision)
PrintMMGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMMGZ)
def PrintCSRGZ(self, file, precision=16):
r"""PrintCSRGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintCSRGZ(self, file, precision)
PrintCSRGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSRGZ)
def PrintCSR(self, *args):
r"""
PrintCSR(SparseMatrix self, std::ostream & out)
PrintCSR(SparseMatrix self, char const * file, int precision=16)
PrintCSR(SparseMatrix self)
"""
return _sparsemat.SparseMatrix_PrintCSR(self, *args)
PrintCSR = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSR)
def PrintCSR2GZ(self, file, precision=16):
r"""PrintCSR2GZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintCSR2GZ(self, file, precision)
PrintCSR2GZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSR2GZ)
def PrintCSR2(self, *args):
r"""
PrintCSR2(SparseMatrix self, std::ostream & out)
PrintCSR2(SparseMatrix self, char const * file, int precision=16)
PrintCSR2(SparseMatrix self)
"""
return _sparsemat.SparseMatrix_PrintCSR2(self, *args)
PrintCSR2 = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSR2)
def PrintInfoGZ(self, file, precision=16):
r"""PrintInfoGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintInfoGZ(self, file, precision)
PrintInfoGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintInfoGZ)
def PrintInfo(self, *args):
r"""
PrintInfo(SparseMatrix self, std::ostream & out)
PrintInfo(SparseMatrix self, char const * file, int precision=16)
PrintInfo(SparseMatrix self)
"""
return _sparsemat.SparseMatrix_PrintInfo(self, *args)
PrintInfo = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintInfo)
# Register SparseMatrix in _sparsemat:
_sparsemat.SparseMatrix_swigregister(SparseMatrix)
def __lshift__(os, mat):
r"""__lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream &"""
return _sparsemat.__lshift__(os, mat)
__lshift__ = _sparsemat.__lshift__
def SparseMatrixFunction(S, f):
r"""SparseMatrixFunction(SparseMatrix S, double (*)(double) f)"""
return _sparsemat.SparseMatrixFunction(S, f)
SparseMatrixFunction = _sparsemat.SparseMatrixFunction
def TransposeAbstractSparseMatrix(A, useActualWidth):
r"""TransposeAbstractSparseMatrix(AbstractSparseMatrix A, int useActualWidth) -> SparseMatrix"""
return _sparsemat.TransposeAbstractSparseMatrix(A, useActualWidth)
TransposeAbstractSparseMatrix = _sparsemat.TransposeAbstractSparseMatrix
def TransposeMult(A, B):
r"""TransposeMult(SparseMatrix A, SparseMatrix B) -> SparseMatrix"""
return _sparsemat.TransposeMult(A, B)
TransposeMult = _sparsemat.TransposeMult
def MultAbstractSparseMatrix(A, B):
r"""MultAbstractSparseMatrix(AbstractSparseMatrix A, AbstractSparseMatrix B) -> SparseMatrix"""
return _sparsemat.MultAbstractSparseMatrix(A, B)
MultAbstractSparseMatrix = _sparsemat.MultAbstractSparseMatrix
def Mult_AtDA(A, D, OAtDA=None):
r"""Mult_AtDA(SparseMatrix A, Vector D, SparseMatrix OAtDA=None) -> SparseMatrix"""
return _sparsemat.Mult_AtDA(A, D, OAtDA)
Mult_AtDA = _sparsemat.Mult_AtDA
def OuterProduct(*args):
r"""
OuterProduct(DenseMatrix A, DenseMatrix B) -> DenseMatrix
OuterProduct(DenseMatrix A, SparseMatrix B) -> SparseMatrix
OuterProduct(SparseMatrix A, DenseMatrix B) -> SparseMatrix
OuterProduct(SparseMatrix A, SparseMatrix B) -> SparseMatrix
"""
return _sparsemat.OuterProduct(*args)
OuterProduct = _sparsemat.OuterProduct
|
mfem/PyMFEM
|
mfem/_par/sparsemat.py
|
Python
|
bsd-3-clause
| 42,261 | 0.004023 |
import json
from mockito import *
import os
import shutil
import tempfile
import unittest
from ice.history import ManagedROMArchive
class ManagedROMArchiveTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.temppath = os.path.join(self.tempdir, "tempfile")
self.mock_user = mock()
self.mock_user.user_id = 1234
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_previous_managed_ids_returns_none_for_missing_file(self):
missing_path = os.path.join("some", "stupid", "path")
self.assertFalse(os.path.exists(missing_path))
archive = ManagedROMArchive(missing_path)
self.assertIsNone(archive.previous_managed_ids(self.mock_user))
def test_previous_managed_ids_raises_exception_for_malformed_json(self):
with open(self.temppath, "w+") as f:
f.write("notrealjson")
with self.assertRaises(ValueError):
archive = ManagedROMArchive(self.temppath)
def test_previous_managed_ids_returns_empty_list_for_missing_user(self):
data = {
"1337": []
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), [])
def test_previous_managed_ids_returns_list_from_json(self):
data = {
"1234": [
"1234567890",
"0987654321",
]
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), ["1234567890","0987654321"])
def test_set_managed_ids_creates_new_file_if_needed(self):
self.assertFalse(os.path.exists(self.temppath))
archive = ManagedROMArchive(self.temppath)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertTrue(os.path.exists(self.temppath))
def test_previous_managed_ids_returns_new_value_after_set_managed_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertEqual(archive.previous_managed_ids(self.mock_user), new_ids)
def test_creating_new_archive_after_set_managed_ids_uses_new_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
new_archive = ManagedROMArchive(self.temppath)
self.assertEqual(new_archive.previous_managed_ids(self.mock_user), new_ids)
|
scottrice/Ice
|
tests/managed_rom_archive_tests.py
|
Python
|
mit
| 2,688 | 0.005952 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from swift.common import utils as swift_utils
from swift.common.http import is_success
from swift.common.middleware import acl as swift_acl
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.swob import HTTPNotFound, HTTPForbidden, HTTPUnauthorized
from swift.common.utils import config_read_reseller_options, list_from_csv
from swift.proxy.controllers.base import get_account_info
import functools
PROJECT_DOMAIN_ID_HEADER = 'x-account-project-domain-id'
PROJECT_DOMAIN_ID_SYSMETA_HEADER = \
get_sys_meta_prefix('account') + 'project-domain-id'
# a string that is unique w.r.t valid ids
UNKNOWN_ID = '_unknown'
class KeystoneAuth(object):
"""Swift middleware to Keystone authorization system.
In Swift's proxy-server.conf add this middleware to your pipeline::
[pipeline:main]
pipeline = catch_errors cache authtoken keystoneauth proxy-server
Make sure you have the authtoken middleware before the
keystoneauth middleware.
The authtoken middleware will take care of validating the user and
keystoneauth will authorize access.
The authtoken middleware is shipped with keystonemiddleware - it
does not have any other dependencies than itself so you can either
install it by copying the file directly in your python path or by
installing keystonemiddleware.
If support is required for unvalidated users (as with anonymous
access) or for formpost/staticweb/tempurl middleware, authtoken will
need to be configured with ``delay_auth_decision`` set to true. See
the Keystone documentation for more detail on how to configure the
authtoken middleware.
In proxy-server.conf you will need to have the setting account
auto creation to true::
[app:proxy-server]
account_autocreate = true
And add a swift authorization filter section, such as::
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
The user who is able to give ACL / create Containers permissions
will be the user with a role listed in the ``operator_roles``
setting which by default includes the admin and the swiftoperator
roles.
The keystoneauth middleware maps a Keystone project/tenant to an account
in Swift by adding a prefix (``AUTH_`` by default) to the tenant/project
id.. For example, if the project id is ``1234``, the path is
``/v1/AUTH_1234``.
If the ``is_admin`` option is ``true``, a user whose username is the same
as the project name and who has any role on the project will have access
rights elevated to be the same as if the user had one of the
``operator_roles``. Note that the condition compares names rather than
UUIDs. This option is deprecated. It is ``false`` by default.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
``reseller_prefix`` in your keystoneauth entry like this::
reseller_prefix = NEWAUTH
Don't forget to also update the Keystone service endpoint configuration to
use NEWAUTH in the path.
It is possible to have several accounts associated with the same project.
This is done by listing several prefixes as shown in the following
example:
reseller_prefix = AUTH, SERVICE
This means that for project id '1234', the paths '/v1/AUTH_1234' and
'/v1/SERVICE_1234' are associated with the project and are authorized
using roles that a user has with that project. The core use of this feature
is that it is possible to provide different rules for each account
prefix. The following parameters may be prefixed with the appropriate
prefix:
operator_roles
service_roles
For backward compatibility, no prefix implies the parameter
applies to all reseller_prefixes. Here is an example, using two
prefixes::
reseller_prefix = AUTH, SERVICE
# The next three lines have identical effects (since the first applies
# to both prefixes).
operator_roles = admin, swiftoperator
AUTH_operator_roles = admin, swiftoperator
SERVICE_operator_roles = admin, swiftoperator
# The next line only applies to accounts with the SERVICE prefix
SERVICE_operator_roles = admin, some_other_role
X-Service-Token tokens are supported by the inclusion of the service_roles
configuration option. When present, this option requires that the
X-Service-Token header supply a token from a user who has a role listed
in service_roles. Here is an example configuration::
reseller_prefix = AUTH, SERVICE
AUTH_operator_roles = admin, swiftoperator
SERVICE_operator_roles = admin, swiftoperator
SERVICE_service_roles = service
The keystoneauth middleware supports cross-tenant access control using
the syntax ``<tenant>:<user>`` to specify a grantee in container Access
Control Lists (ACLs). For a request to be granted by an ACL, the grantee
``<tenant>`` must match the UUID of the tenant to which the request
token is scoped and the grantee ``<user>`` must match the UUID of the
user authenticated by the request token.
Note that names must no longer be used in cross-tenant ACLs because with
the introduction of domains in keystone names are no longer globally
unique.
For backwards compatibility, ACLs using names will be granted by
keystoneauth when it can be established that the grantee tenant,
the grantee user and the tenant being accessed are either not yet in a
domain (e.g. the request token has been obtained via the keystone v2
API) or are all in the default domain to which legacy accounts would
have been migrated. The default domain is identified by its UUID,
which by default has the value ``default``. This can be changed by
setting the ``default_domain_id`` option in the keystoneauth
configuration::
default_domain_id = default
The backwards compatible behavior can be disabled by setting the config
option ``allow_names_in_acls`` to false::
allow_names_in_acls = false
To enable this backwards compatibility, keystoneauth will attempt to
determine the domain id of a tenant when any new account is created,
and persist this as account metadata. If an account is created for a tenant
using a token with reselleradmin role that is not scoped on that tenant,
keystoneauth is unable to determine the domain id of the tenant;
keystoneauth will assume that the tenant may not be in the default domain
and therefore not match names in ACLs for that account.
By default, middleware higher in the WSGI pipeline may override auth
processing, useful for middleware such as tempurl and formpost. If you know
you're not going to use such middleware and you want a bit of extra
security you can disable this behaviour by setting the ``allow_overrides``
option to ``false``::
allow_overrides = false
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = swift_utils.get_logger(conf, log_route='keystoneauth')
self.reseller_prefixes, self.account_rules = \
config_read_reseller_options(conf,
dict(operator_roles=['admin',
'swiftoperator'],
service_roles=[]))
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin').lower()
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = swift_utils.config_true_value(config_is_admin)
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = swift_utils.config_true_value(config_overrides)
self.default_domain_id = conf.get('default_domain_id', 'default')
self.allow_names_in_acls = swift_utils.config_true_value(
conf.get('allow_names_in_acls', 'true'))
def __call__(self, environ, start_response):
identity = self._keystone_identity(environ)
# Check if one of the middleware like tempurl or formpost have
# set the swift.authorize_override environ and want to control the
# authentication
if (self.allow_overrides and
environ.get('swift.authorize_override', False)):
msg = 'Authorizing from an overriding middleware'
self.logger.debug(msg)
return self.app(environ, start_response)
if identity:
self.logger.debug('Using identity: %r', identity)
environ['keystone.identity'] = identity
environ['REMOTE_USER'] = identity.get('tenant')
env_identity = self._integral_keystone_identity(environ)
environ['swift.authorize'] = functools.partial(
self.authorize, env_identity)
user_roles = (r.lower() for r in identity.get('roles', []))
if self.reseller_admin_role in user_roles:
environ['reseller_request'] = True
else:
self.logger.debug('Authorizing as anonymous')
environ['swift.authorize'] = self.authorize_anonymous
environ['swift.clean_acl'] = swift_acl.clean_acl
def keystone_start_response(status, response_headers, exc_info=None):
project_domain_id = None
for key, val in response_headers:
if key.lower() == PROJECT_DOMAIN_ID_SYSMETA_HEADER:
project_domain_id = val
break
if project_domain_id:
response_headers.append((PROJECT_DOMAIN_ID_HEADER,
project_domain_id))
return start_response(status, response_headers, exc_info)
return self.app(environ, keystone_start_response)
def _keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
# In next release, we would add user id in env['keystone.identity'] by
# using _integral_keystone_identity to replace current
# _keystone_identity. The purpose of keeping it in this release it for
# back compatibility.
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = []
if 'HTTP_X_ROLES' in environ:
roles = environ['HTTP_X_ROLES'].split(',')
identity = {'user': environ.get('HTTP_X_USER_NAME'),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles}
return identity
def _integral_keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = list_from_csv(environ.get('HTTP_X_ROLES', ''))
service_roles = list_from_csv(environ.get('HTTP_X_SERVICE_ROLES', ''))
identity = {'user': (environ.get('HTTP_X_USER_ID'),
environ.get('HTTP_X_USER_NAME')),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles,
'service_roles': service_roles}
token_info = environ.get('keystone.token_info', {})
auth_version = 0
user_domain = project_domain = (None, None)
if 'access' in token_info:
# ignore any domain id headers that authtoken may have set
auth_version = 2
elif 'token' in token_info:
auth_version = 3
user_domain = (environ.get('HTTP_X_USER_DOMAIN_ID'),
environ.get('HTTP_X_USER_DOMAIN_NAME'))
project_domain = (environ.get('HTTP_X_PROJECT_DOMAIN_ID'),
environ.get('HTTP_X_PROJECT_DOMAIN_NAME'))
identity['user_domain'] = user_domain
identity['project_domain'] = project_domain
identity['auth_version'] = auth_version
return identity
def _get_account_name(self, prefix, tenant_id):
return '%s%s' % (prefix, tenant_id)
def _account_matches_tenant(self, account, tenant_id):
"""Check if account belongs to a project/tenant"""
for prefix in self.reseller_prefixes:
if self._get_account_name(prefix, tenant_id) == account:
return True
return False
def _get_account_prefix(self, account):
"""Get the prefix of an account"""
# Empty prefix matches everything, so try to match others first
for prefix in [pre for pre in self.reseller_prefixes if pre != '']:
if account.startswith(prefix):
return prefix
if '' in self.reseller_prefixes:
return ''
return None
def _get_project_domain_id(self, environ):
info = get_account_info(environ, self.app, 'KS')
domain_id = info.get('sysmeta', {}).get('project-domain-id')
exists = is_success(info.get('status', 0))
return exists, domain_id
def _set_project_domain_id(self, req, path_parts, env_identity):
'''
Try to determine the project domain id and save it as
account metadata. Do this for a PUT or POST to the
account, and also for a container PUT in case that
causes the account to be auto-created.
'''
if PROJECT_DOMAIN_ID_SYSMETA_HEADER in req.headers:
return
version, account, container, obj = path_parts
method = req.method
if (obj or (container and method != 'PUT')
or method not in ['PUT', 'POST']):
return
tenant_id, tenant_name = env_identity['tenant']
exists, sysmeta_id = self._get_project_domain_id(req.environ)
req_has_id, req_id, new_id = False, None, None
if self._account_matches_tenant(account, tenant_id):
# domain id can be inferred from request (may be None)
req_has_id = True
req_id = env_identity['project_domain'][0]
if not exists:
# new account so set a domain id
new_id = req_id if req_has_id else UNKNOWN_ID
elif sysmeta_id is None and req_id == self.default_domain_id:
# legacy account, update if default domain id in req
new_id = req_id
elif sysmeta_id == UNKNOWN_ID and req_has_id:
# unknown domain, update if req confirms domain
new_id = req_id or ''
elif req_has_id and sysmeta_id != req_id:
self.logger.warn("Inconsistent project domain id: " +
"%s in token vs %s in account metadata."
% (req_id, sysmeta_id))
if new_id is not None:
req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id
def _is_name_allowed_in_acl(self, req, path_parts, identity):
if not self.allow_names_in_acls:
return False
user_domain_id = identity['user_domain'][0]
if user_domain_id and user_domain_id != self.default_domain_id:
return False
proj_domain_id = identity['project_domain'][0]
if proj_domain_id and proj_domain_id != self.default_domain_id:
return False
# request user and scoped project are both in default domain
tenant_id, tenant_name = identity['tenant']
version, account, container, obj = path_parts
if self._account_matches_tenant(account, tenant_id):
# account == scoped project, so account is also in default domain
allow = True
else:
# retrieve account project domain id from account sysmeta
exists, acc_domain_id = self._get_project_domain_id(req.environ)
allow = exists and acc_domain_id in [self.default_domain_id, None]
if allow:
self.logger.debug("Names allowed in acls.")
return allow
def _authorize_cross_tenant(self, user_id, user_name,
tenant_id, tenant_name, roles,
allow_names=True):
"""Check cross-tenant ACLs.
Match tenant:user, tenant and user could be its id, name or '*'
:param user_id: The user id from the identity token.
:param user_name: The user name from the identity token.
:param tenant_id: The tenant ID from the identity token.
:param tenant_name: The tenant name from the identity token.
:param roles: The given container ACL.
:param allow_names: If True then attempt to match tenant and user names
as well as id's.
:returns: matched string if tenant(name/id/*):user(name/id/*) matches
the given ACL.
None otherwise.
"""
tenant_match = [tenant_id, '*']
user_match = [user_id, '*']
if allow_names:
tenant_match = tenant_match + [tenant_name]
user_match = user_match + [user_name]
for tenant in tenant_match:
for user in user_match:
s = '%s:%s' % (tenant, user)
if s in roles:
return s
return None
def authorize(self, env_identity, req):
tenant_id, tenant_name = env_identity['tenant']
user_id, user_name = env_identity['user']
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
self._set_project_domain_id(req, part, env_identity)
user_roles = [r.lower() for r in env_identity.get('roles', [])]
user_service_roles = [r.lower() for r in env_identity.get(
'service_roles', [])]
# Give unconditional access to a user with the reseller_admin
# role.
if self.reseller_admin_role in user_roles:
msg = 'User %s has reseller admin authorizing'
self.logger.debug(msg, tenant_id)
req.environ['swift_owner'] = True
return
# If we are not reseller admin and user is trying to delete its own
# account then deny it.
if not container and not obj and req.method == 'DELETE':
# User is not allowed to issue a DELETE on its own account
msg = 'User %s:%s is not allowed to delete its own account'
self.logger.debug(msg, tenant_name, user_name)
return self.denied_response(req)
# cross-tenant authorization
matched_acl = None
if roles:
allow_names = self._is_name_allowed_in_acl(req, part, env_identity)
matched_acl = self._authorize_cross_tenant(user_id, user_name,
tenant_id, tenant_name,
roles, allow_names)
if matched_acl is not None:
log_msg = 'user %s allowed in ACL authorizing.'
self.logger.debug(log_msg, matched_acl)
return
acl_authorized = self._authorize_unconfirmed_identity(req, obj,
referrers,
roles)
if acl_authorized:
return
# Check if a user tries to access an account that does not match their
# token
if not self._account_matches_tenant(account, tenant_id):
log_msg = 'tenant mismatch: %s != %s'
self.logger.debug(log_msg, account, tenant_id)
return self.denied_response(req)
# Compare roles from tokens against the configuration options:
#
# X-Auth-Token role Has specified X-Service-Token role Grant
# in operator_roles? service_roles? in service_roles? swift_owner?
# ------------------ -------------- -------------------- ------------
# yes yes yes yes
# yes no don't care yes
# no don't care don't care no
# ------------------ -------------- -------------------- ------------
account_prefix = self._get_account_prefix(account)
operator_roles = self.account_rules[account_prefix]['operator_roles']
have_operator_role = set(operator_roles).intersection(
set(user_roles))
service_roles = self.account_rules[account_prefix]['service_roles']
have_service_role = set(service_roles).intersection(
set(user_service_roles))
if have_operator_role and (service_roles and have_service_role):
req.environ['swift_owner'] = True
elif have_operator_role and not service_roles:
req.environ['swift_owner'] = True
if req.environ.get('swift_owner'):
log_msg = 'allow user with role(s) %s as account admin'
self.logger.debug(log_msg, ','.join(have_operator_role.union(
have_service_role)))
return
# If user is of the same name of the tenant then make owner of it.
if self.is_admin and user_name == tenant_name:
self.logger.warning("the is_admin feature has been deprecated "
"and will be removed in the future "
"update your config file")
req.environ['swift_owner'] = True
return
if acl_authorized is not None:
return self.denied_response(req)
# Check if we have the role in the userroles and allow it
for user_role in user_roles:
if user_role in (r.lower() for r in roles):
log_msg = 'user %s:%s allowed in ACL: %s authorizing'
self.logger.debug(log_msg, tenant_name, user_name,
user_role)
return
return self.denied_response(req)
def authorize_anonymous(self, req):
"""
Authorize an anonymous request.
:returns: None if authorization is granted, an error page otherwise.
"""
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
is_authoritative_authz = (account and
(self._get_account_prefix(account) in
self.reseller_prefixes))
if not is_authoritative_authz:
return self.denied_response(req)
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if not authorized:
return self.denied_response(req)
def _authorize_unconfirmed_identity(self, req, obj, referrers, roles):
""""
Perform authorization for access that does not require a
confirmed identity.
:returns: A boolean if authorization is granted or denied. None if
a determination could not be made.
"""
# Allow container sync.
if (req.environ.get('swift_sync_key')
and (req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None))
and 'x-timestamp' in req.headers):
log_msg = 'allowing proxy %s for container-sync'
self.logger.debug(log_msg, req.remote_addr)
return True
# Check if referrer is allowed.
if swift_acl.referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in roles:
log_msg = 'authorizing %s via referer ACL'
self.logger.debug(log_msg, req.referrer)
return True
return False
def denied_response(self, req):
"""Deny WSGI Response.
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return KeystoneAuth(app, conf)
return auth_filter
|
dpgoetz/swift
|
swift/common/middleware/keystoneauth.py
|
Python
|
apache-2.0
| 25,905 | 0.000116 |
from flask.ext.wtf import Form
from wtforms import TextAreaField
from wtforms.validators import DataRequired
class RequestForm(Form):
inputText = TextAreaField('inputText', validators=[DataRequired()])
|
jarjun/EmergencyTextToVoiceCall
|
app/forms.py
|
Python
|
mit
| 204 | 0.009804 |
# Authors: David Goodger; Gunnar Schwant
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 21817 $
# Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': 'Autor',
'authors': 'Autoren',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Datum',
'dedication': 'Widmung',
'copyright': 'Copyright',
'abstract': 'Zusammenfassung',
'attention': 'Achtung!',
'caution': 'Vorsicht!',
'danger': '!GEFAHR!',
'error': 'Fehler',
'hint': 'Hinweis',
'important': 'Wichtig',
'note': 'Bemerkung',
'tip': 'Tipp',
'warning': 'Warnung',
'contents': 'Inhalt'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'autor': 'author',
'autoren': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'widmung': 'dedication',
'zusammenfassung': 'abstract'}
"""German (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
garinh/cs
|
docs/support/docutils/languages/de.py
|
Python
|
lgpl-2.1
| 1,814 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
########################################################################
# Solves problem 76 from projectEuler.net.
# Finds the number of different ways that 100 can be written as sum of
# 2 positive integers.
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
if __name__ == '__main__':
ways = [0 for x in range(101)]
ways[0] = 1
for coin in reversed(range(1, 100)):
for index in range(coin, 101):
ways[index] += ways[index - coin]
print("The result is:", ways[100])
|
salessandri/programming-contests
|
project-euler/problem076.py
|
Python
|
gpl-3.0
| 1,376 | 0.00218 |
import rdtest
class D3D11_Texture_Zoo(rdtest.TestCase):
slow_test = True
demos_test_name = 'D3D11_Texture_Zoo'
def __init__(self):
rdtest.TestCase.__init__(self)
self.zoo_helper = rdtest.Texture_Zoo()
def check_capture(self):
# This takes ownership of the controller and shuts it down when it's finished
self.zoo_helper.check_capture(self.capture_filename, self.controller)
self.controller = None
|
moradin/renderdoc
|
util/test/tests/D3D11/D3D11_Texture_Zoo.py
|
Python
|
mit
| 457 | 0.002188 |
# coding: utf-8
from geventwebsocket.handler import WebSocketHandler
from gevent import pywsgi, sleep
import json
import MySQLdb
class JPC:
#
# 初期化
#
def __init__(self, filepath_config):
import hashlib
# 設定ファイルをロード
fp = open(filepath_config, 'r')
config = json.load(fp)
fp.close()
# 設定をクラス変数に格納
self.host = config['host']
self.port = config['port']
self.langlist = json.load(open(config['langfile'], 'r'))
self.enckey = hashlib.md5(config['key']).digest()
self.db_host = config['db_host']
self.db_name = config['db_name']
self.db_username = config['db_username']
self.db_password = config['db_password']
return
#
# チェック
#
def execute(self):
import codecs
import commands
import os
import pwd
# 情報を取得
code = self.packet['code']
lang = self.packet['lang']
script = self.langlist['compile'][lang]
extension = self.langlist['extension'][lang]
# 必要なデータを生成
filepath_in = self.randstr(8) + extension
filepath_out = self.randstr(8)
username = self.randstr(16)
# /tmpに移動
os.chdir('/tmp/')
# ユーザーを作成する
try:
os.system("useradd -M {0}".format(username))
pwnam = pwd.getpwnam(username)
except Exception:
return
# コードを生成
fp = codecs.open(filepath_in, 'w', 'utf-8')
fp.write(code)
fp.close()
# コンパイル
compile_result = commands.getoutput(
script.format(input=filepath_in, output=filepath_out)
)
# コードを削除
try:
os.remove(filepath_in)
except Exception:
pass
# コンパイル結果を送信
try:
self.ws.send(json.dumps({'compile': compile_result}))
except Exception:
pass
# コンパイルできているか
if not os.path.exists(filepath_out):
print("[INFO] コンパイルに失敗しました。")
return
# 実行ファイルの権限を変更
try:
os.chmod(filepath_out, 0500)
os.chown(filepath_out, pwnam.pw_uid, pwnam.pw_gid)
# 出力例も一応
os.chown(self.record['output_code'], pwnam.pw_uid, pwnam.pw_gid)
except Exception:
try:
os.remove(filepath_out)
os.system("userdel -r {0}".format(username))
except Exception:
print("[ERROR] /tmp/{0}の削除に失敗しました。".format(filepath_out))
print("[ERROR] ユーザー{0}の削除に失敗しました。".format(username))
return
# チェックする
clear = True
for n in range(int(self.record['exec_time'])):
print("[INFO] {0}回目の試行が開始されました。".format(n + 1))
# 実行開始を宣言
try:
self.ws.send(json.dumps({'attempt': n + 1}))
except Exception:
pass
# 入力を生成
self.input_data = commands.getoutput(
self.record['input_code'] + " " + str(n)
)
# 出力を生成
self.output_data = self.run_command(username, self.record['output_code'])
# 実行結果を取得
result = self.run_command(username, './'+filepath_out)
#print "Input : ", self.input_data
#print "Answer : ", self.output_data
#print "Result : ", result
# タイムアウト
if result == False:
self.ws.send(json.dumps({'failure': n + 1}))
clear = False
print("[INFO] タイムアウトしました。")
continue
# 結果が違う
if self.output_data.rstrip('\n') != result.rstrip('\n'):
self.ws.send(json.dumps({'failure': n + 1}))
clear = False
print("[INFO] 結果に誤りがあります。")
continue
# 実行結果を宣言
try:
self.ws.send(json.dumps({'success': n + 1}))
print("[INFO] チェックが成功しました。")
except Exception:
pass
# 成功通知
if clear:
self.ws.send('{"complete":"success"}')
self.update_db()
else:
self.ws.send('{"complete":"failure"}')
# 実行ファイルを削除
try:
os.remove(filepath_out)
os.system("userdel -r {0}".format(username))
except Exception:
print("[ERROR] /tmp/{0}の削除に失敗しました。".format(filepath_out))
print("[ERROR] ユーザー{0}の削除に失敗しました。".format(username))
return
#
# コマンドを制限付きで実行
#
def run_command(self, username, filepath):
import subprocess
import time
import sys
# プロセスを生成
proc = subprocess.Popen(
[
'su',
username,
'-c',
'ulimit -v {0}; {1}'.format(
str(self.record['limit_memory']),
filepath
)
],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
stdin = subprocess.PIPE,
)
# 入力を送る
proc.stdin.write(self.input_data.rstrip('\n') + '\n')
proc.stdin.close()
# 時間制限を設定
deadline = time.time() + float(self.record['limit_time']) / 1000.0
while time.time() < deadline and proc.poll() == None:
time.sleep(0.20)
# タイムアウト
if proc.poll() == None:
if float(sys.version[:3]) >= 2.6:
proc.terminate()
return False
# 正常終了
stdout = proc.stdout.read()
return stdout
#
# 点数を追加
#
def update_db(self):
import time
cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
# スコアを追加
cursor.execute("UPDATE account SET score=score+{score} WHERE user='{user}';".format(score=int(self.record['score']), user=self.user))
# 解答済み問題を追加
cursor.execute("UPDATE account SET solved=concat('{id},', solved) WHERE user='{user}';".format(id=self.record['id'], user=self.user))
# 解答数をインクリメント
cursor.execute("UPDATE problem SET solved=solved+1 WHERE id={id};".format(id=self.record['id']))
# 解答ユーザーを更新
cursor.execute("UPDATE problem SET solved_user='{user}' WHERE id={id};".format(user=self.user, id=self.record['id']))
# 解答時間を更新
cursor.execute("UPDATE problem SET last_date='{date}' WHERE id={id};".format(date=time.strftime('%Y-%m-%d %H:%M:%S'), id=self.record['id']))
cursor.close()
self.db.commit()
return
#
# 新規要求を処理
#
def handle(self, env, response):
self.ws = env['wsgi.websocket']
print("[INFO] 新しい要求を受信しました。")
# 要求を取得
self.packet = self.ws.receive()
if not self.analyse_packet(): return
# 問題を取得
self.get_problem()
# 実行
self.execute()
return
#
# 問題の詳細を取得
#
def get_problem(self):
cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * FROM problem WHERE id={id};".format(id=self.packet['id']))
self.record = cursor.fetchall()[0]
cursor.close()
return
#
# データを解析
#
def analyse_packet(self):
from Crypto.Cipher import AES
# パケットをJSONとして展開
try:
self.packet = json.loads(self.packet)
except Exception:
print("[ERROR] JSONの展開に失敗しました。")
return False
# データの整合性を確認
if not self.check_payload():
print("[ERROR] 不正なデータであると判別されました。")
self.ws.send('{"error":"無効なデータが送信されました。"}')
return False
# ユーザー名を復号化
iv = self.packet['iv'].decode('base64')
enc_user = self.packet['user'].decode('base64')
aes = AES.new(self.enckey, AES.MODE_CBC, iv)
self.user = aes.decrypt(enc_user).replace('\x00', '')
print("[INFO] この試行のユーザーは{0}です。".format(self.user))
# エスケープ
self.user = MySQLdb.escape_string(self.user)
self.packet['id'] = int(self.packet['id'])
return True
#
# payloadが有効かを調べる
#
def check_payload(self):
# 最低限の情報が記載されているか
if 'lang' not in self.packet : return False
if 'code' not in self.packet : return False
if 'id' not in self.packet : return False
if 'iv' not in self.packet : return False
if 'user' not in self.packet : return False
# 言語が使用可能か
if 'compile' not in self.langlist : return False
if 'extension' not in self.langlist : return False
if self.packet['lang'] not in self.langlist['compile'] : return False
if self.packet['lang'] not in self.langlist['extension'] : return False
# データが正しい
return True
#
# ランダムな文字列を生成
#
def randstr(self, length):
import random
import string
return ''.join([
random.choice(string.ascii_letters + string.digits)
for i in range(length)
])
#
# リクエストを受ける
#
def procon(self, env, response):
path = env['PATH_INFO']
if path == "/":
return self.handle(env, response)
return
#
# サーバーを稼働させる
#
def run(self):
# サーバー初期化
server = pywsgi.WSGIServer(
(self.host, self.port),
self.procon,
handler_class = WebSocketHandler
)
# SQLへの接続
self.db = MySQLdb.connect(host = self.db_host,
db = self.db_name,
user = self.db_username,
passwd = self.db_password,
charset = 'utf8',
)
# サーバー稼働
server.serve_forever()
return
|
ptr-yudai/JokenPC
|
server/JPC.py
|
Python
|
mit
| 11,068 | 0.006781 |
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from click.testing import CliRunner
from tower_cli.cli.action import ActionSubcommand
from tests.compat import unittest
CATEGORIZED_OUTPUT = """Usage: foo [OPTIONS]
Field Options:
--bar TEXT foobar
Local Options:
--foo TEXT foobar
Global Options:
--tower-host TEXT foobar
Other Options:
--help Show this message and exit.
"""
class ActionCommandTests(unittest.TestCase):
"""A set of tests to ensure that the tower_cli Command class works
in the way we expect.
"""
def setUp(self):
self.runner = CliRunner()
def test_dash_dash_help(self):
"""Establish that no_args_is_help causes the help to be printed,
and an exit.
"""
# Create a command with which to test.
@click.command(no_args_is_help=True, cls=ActionSubcommand)
@click.argument('parrot')
def foo(parrot):
click.echo(parrot)
# Establish that this command echos if called with echo.
self.assertEqual(self.runner.invoke(foo, ['bar']).output, 'bar\n')
# Establish that this command sends help if called with nothing.
result = self.runner.invoke(foo)
self.assertIn('--help', result.output)
self.assertIn('Show this message and exit.\n', result.output)
def test_categorize_options(self):
"""Establish that options in help text are correctly categorized.
"""
@click.command(cls=ActionSubcommand)
@click.option('--foo', help='foobar')
@click.option('--bar', help='[FIELD]foobar')
@click.option('--tower-host', help='foobar')
def foo():
pass
result = self.runner.invoke(foo)
self.assertEqual(result.output, CATEGORIZED_OUTPUT)
@click.command(cls=ActionSubcommand, add_help_option=False)
def bar():
pass
result = self.runner.invoke(bar)
self.assertEqual(result.output, 'Usage: bar [OPTIONS]\n')
|
AlanCoding/tower-cli
|
tests/test_cli_action.py
|
Python
|
apache-2.0
| 2,582 | 0 |
"""
File format detector
"""
import logging, sys, os, csv, tempfile, shutil, re, zipfile
import registry
from galaxy import util
log = logging.getLogger(__name__)
def get_test_fname(fname):
"""Returns test data filename"""
path, name = os.path.split(__file__)
full_path = os.path.join(path, 'test', fname)
return full_path
def stream_to_file( stream, suffix='', prefix='', dir=None, text=False ):
"""Writes a stream to a temporary file, returns the temporary file's name"""
fd, temp_name = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=dir, text=text )
CHUNK_SIZE = 1048576
data_checked = False
is_compressed = False
is_binary = False
is_multi_byte = False
while 1:
chunk = stream.read( CHUNK_SIZE )
if not chunk:
break
if not data_checked:
# See if we're uploading a compressed file
if zipfile.is_zipfile( temp_name ):
is_compressed = True
else:
try:
if unicode( chunk[:2] ) == unicode( util.gzip_magic ):
is_compressed = True
except:
pass
if not is_compressed:
# See if we have a multi-byte character file
chars = chunk[:100]
is_multi_byte = util.is_multi_byte( chars )
if not is_multi_byte:
for char in chars:
if ord( char ) > 128:
is_binary = True
break
data_checked = True
if not is_compressed and not is_binary:
os.write( fd, chunk.encode( "utf-8" ) )
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
os.write( fd, chunk )
os.close( fd )
return temp_name, is_multi_byte
def check_newlines( fname, bytes_to_read=52428800 ):
"""
Determines if there are any non-POSIX newlines in the first
number_of_bytes (by default, 50MB) of the file.
"""
CHUNK_SIZE = 2 ** 20
f = open( fname, 'r' )
for chunk in f.read( CHUNK_SIZE ):
if f.tell() > bytes_to_read:
break
if chunk.count( '\r' ):
f.close()
return True
f.close()
return False
def convert_newlines( fname ):
"""
Converts in place a file from universal line endings
to Posix line endings.
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\r3 4")
>>> convert_newlines(fname)
2
>>> file(fname).read()
'1 2\\n3 4\\n'
"""
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname, "U" ) ):
fp.write( "%s\n" % line.rstrip( "\r\n" ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def sep2tabs(fname, patt="\\s+"):
"""
Transforms in place a 'sep' separated file to a tab separated one
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\n3 4\\n")
>>> sep2tabs(fname)
2
>>> file(fname).read()
'1\\t2\\n3\\t4\\n'
"""
regexp = re.compile( patt )
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname ) ):
line = line.rstrip( '\r\n' )
elems = regexp.split( line )
fp.write( "%s\n" % '\t'.join( elems ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def convert_newlines_sep2tabs( fname, patt="\\s+" ):
"""
Combines above methods: convert_newlines() and sep2tabs()
so that files do not need to be read twice
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\r3 4")
>>> convert_newlines_sep2tabs(fname)
2
>>> file(fname).read()
'1\\t2\\n3\\t4\\n'
"""
regexp = re.compile( patt )
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname, "U" ) ):
line = line.rstrip( '\r\n' )
elems = regexp.split( line )
fp.write( "%s\n" % '\t'.join( elems ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def get_headers( fname, sep, count=60, is_multi_byte=False ):
"""
Returns a list with the first 'count' lines split by 'sep'
>>> fname = get_test_fname('complete.bed')
>>> get_headers(fname,'\\t')
[['chr7', '127475281', '127491632', 'NM_000230', '0', '+', '127486022', '127488767', '0', '3', '29,172,3225,', '0,10713,13126,'], ['chr7', '127486011', '127488900', 'D49487', '0', '+', '127486022', '127488767', '0', '2', '155,490,', '0,2399']]
"""
headers = []
for idx, line in enumerate(file(fname)):
line = line.rstrip('\n\r')
if is_multi_byte:
# TODO: fix this - sep is never found in line
line = unicode( line, 'utf-8' )
sep = sep.encode( 'utf-8' )
headers.append( line.split(sep) )
if idx == count:
break
return headers
def is_column_based( fname, sep='\t', skip=0, is_multi_byte=False ):
"""
Checks whether the file is column based with respect to a separator
(defaults to tab separator).
>>> fname = get_test_fname('test.gff')
>>> is_column_based(fname)
True
>>> fname = get_test_fname('test_tab.bed')
>>> is_column_based(fname)
True
>>> is_column_based(fname, sep=' ')
False
>>> fname = get_test_fname('test_space.txt')
>>> is_column_based(fname)
False
>>> is_column_based(fname, sep=' ')
True
>>> fname = get_test_fname('test_ensembl.tab')
>>> is_column_based(fname)
True
>>> fname = get_test_fname('test_tab1.tabular')
>>> is_column_based(fname, sep=' ', skip=0)
False
>>> fname = get_test_fname('test_tab1.tabular')
>>> is_column_based(fname)
True
"""
headers = get_headers( fname, sep, is_multi_byte=is_multi_byte )
count = 0
if not headers:
return False
for hdr in headers[skip:]:
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) > 1:
count = len(hdr)
break
if count < 2:
return False
for hdr in headers[skip:]:
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != count:
return False
return True
def guess_ext( fname, sniff_order=None, is_multi_byte=False ):
"""
Returns an extension that can be used in the datatype factory to
generate a data for the 'fname' file
>>> fname = get_test_fname('megablast_xml_parser_test1.blastxml')
>>> guess_ext(fname)
'blastxml'
>>> fname = get_test_fname('interval.interval')
>>> guess_ext(fname)
'interval'
>>> fname = get_test_fname('interval1.bed')
>>> guess_ext(fname)
'bed'
>>> fname = get_test_fname('test_tab.bed')
>>> guess_ext(fname)
'bed'
>>> fname = get_test_fname('sequence.maf')
>>> guess_ext(fname)
'maf'
>>> fname = get_test_fname('sequence.fasta')
>>> guess_ext(fname)
'fasta'
>>> fname = get_test_fname('file.html')
>>> guess_ext(fname)
'html'
>>> fname = get_test_fname('test.gff')
>>> guess_ext(fname)
'gff'
>>> fname = get_test_fname('gff_version_3.gff')
>>> guess_ext(fname)
'gff3'
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("a\\t2\\nc\\t1\\nd\\t0")
>>> guess_ext(fname)
'tabular'
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("a 1 2 x\\nb 3 4 y\\nc 5 6 z")
>>> guess_ext(fname)
'txt'
>>> fname = get_test_fname('test_tab1.tabular')
>>> guess_ext(fname)
'tabular'
>>> fname = get_test_fname('alignment.lav')
>>> guess_ext(fname)
'lav'
"""
if sniff_order is None:
datatypes_registry = registry.Registry()
sniff_order = datatypes_registry.sniff_order
for datatype in sniff_order:
"""
Some classes may not have a sniff function, which is ok. In fact, the
Tabular and Text classes are 2 examples of classes that should never have
a sniff function. Since these classes are default classes, they contain
few rules to filter out data of other formats, so they should be called
from this function after all other datatypes in sniff_order have not been
successfully discovered.
"""
try:
if datatype.sniff( fname ):
return datatype.file_ext
except:
pass
headers = get_headers( fname, None )
is_binary = False
if is_multi_byte:
is_binary = False
else:
for hdr in headers:
for char in hdr:
if len( char ) > 1:
for c in char:
if ord( c ) > 128:
is_binary = True
break
elif ord( char ) > 128:
is_binary = True
break
if is_binary:
break
if is_binary:
break
if is_binary:
return 'data' #default binary data type file extension
if is_column_based( fname, '\t', 1, is_multi_byte=is_multi_byte ):
return 'tabular' #default tabular data type file extension
return 'txt' #default text data type file extension
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
dbcls/dbcls-galaxy
|
lib/galaxy/datatypes/sniff.py
|
Python
|
mit
| 9,773 | 0.014734 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations(object):
"""VirtualRouterPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
"""Gets the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualRouterPeering"]
"""Creates or updates the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:param parameters: Parameters supplied to the create or update Virtual Router Peering
operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualRouterPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualRouterPeeringListResult"]
"""Lists all Virtual Router Peerings in a Virtual Router resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_virtual_router_peerings_operations.py
|
Python
|
mit
| 22,524 | 0.004972 |
"""
The collection of decorators for the ``color()`` method, each CA model
should have.
The method should be decorated by one of the classes below, otherwise
the correct model behavior will not be guaranteed.
All decorators are get the ``(red, green, blue)`` tuple from
``color()`` method, then process it to create some color effect.
A minimal example::
from xentica import core
from xentica.core import color_effects
class MyCA(core.CellularAutomaton):
state = core.IntegerProperty(max_val=1)
# ...
@color_effects.MovingAverage
def color(self):
red = self.main.state * 255
green = self.main.state * 255
blue = self.main.state * 255
return (red, green, blue)
"""
from xentica.core.variables import Constant
from xentica.core.mixins import BscaDetectorMixin
__all__ = ['ColorEffect', 'MovingAverage', ]
class ColorEffect(BscaDetectorMixin):
"""
The base class for other color effects.
You may also use it as a standalone color effect decorator, it just
doing nothing, storing the calculated RGB value directly.
To create your own class inherited from :class:`ColorEffect`, you
should override ``__call__`` method, and place a code of the color
processing into ``self.effect``. The code should process values
of ``new_r``, ``new_g``, ``new_b`` variables and store the result
back to them.
An example::
class MyEffect(ColorEffect):
def __call__(self, *args):
self.effect = "new_r += 20;"
self.effect += "new_g += 15;"
self.effect += "new_b += 10;"
return super().__call__(*args)
"""
def __init__(self, func):
"""Initialize base attributes."""
self.func = func
self.effect = ""
def __call__(self):
"""
Implement the color decorator.
Sibling classes should override this method, and return
``super`` result, like shown in the example above.
"""
red, green, blue = self.func(self.bsca)
code = """
int new_r = %s;
int new_g = %s;
int new_b = %s;
%s
col[i] = make_int3(new_r, new_g, new_b);
""" % (red, green, blue, self.effect)
self.bsca.append_code(code)
class MovingAverage(ColorEffect):
"""
Apply the moving average to each color channel separately.
With this effect, 3 additional settings are available for you in
``Experiment`` classes:
fade_in
The maximum delta by which a channel could
*increase* its value in a single timestep.
fade_out
The maximum delta by which a channel could
*decrease* its value in a single timestep.
smooth_factor
The divisor for two previous settings, to make
the effect even smoother.
"""
def __call__(self):
"""Implement the effect."""
if not hasattr(self.bsca, "fade_in"):
self.bsca.fade_in = 255
if not hasattr(self.bsca, "fade_out"):
self.bsca.fade_out = 255
if not hasattr(self.bsca, "smooth_factor"):
self.bsca.smooth_factor = 1
self.bsca.define_constant(Constant("FADE_IN", self.bsca.fade_in))
self.bsca.define_constant(Constant("FADE_OUT", self.bsca.fade_out))
self.bsca.define_constant(Constant("SMOOTH_FACTOR",
self.bsca.smooth_factor))
self.effect = """
new_r *= SMOOTH_FACTOR;
new_g *= SMOOTH_FACTOR;
new_b *= SMOOTH_FACTOR;
int3 old_col = col[i];
new_r = max(min(new_r, old_col.x + FADE_IN),
old_col.x - FADE_OUT);
new_g = max(min(new_g, old_col.y + FADE_IN),
old_col.y - FADE_OUT);
new_b = max(min(new_b, old_col.z + FADE_IN),
old_col.z - FADE_OUT);
"""
return super().__call__()
|
a5kin/hecate
|
xentica/core/color_effects.py
|
Python
|
mit
| 4,034 | 0 |
# -*- coding: utf-8 -*-
import operator
from django.conf import settings
from moneyed import CURRENCIES, DEFAULT_CURRENCY, DEFAULT_CURRENCY_CODE
# The default currency, you can define this in your project's settings module
# This has to be a currency object imported from moneyed
DEFAULT_CURRENCY = getattr(settings, 'DEFAULT_CURRENCY', DEFAULT_CURRENCY)
# The default currency choices, you can define this in your project's
# settings module
PROJECT_CURRENCIES = getattr(settings, 'CURRENCIES', None)
CURRENCY_CHOICES = getattr(settings, 'CURRENCY_CHOICES', None)
if CURRENCY_CHOICES is None:
if PROJECT_CURRENCIES:
CURRENCY_CHOICES = [(code, CURRENCIES[code].name) for code in PROJECT_CURRENCIES]
else:
CURRENCY_CHOICES = [(c.code, c.name) for i, c in CURRENCIES.items() if
c.code != DEFAULT_CURRENCY_CODE]
CURRENCY_CHOICES.sort(key=operator.itemgetter(1, 0))
DECIMAL_PLACES = getattr(settings, 'CURRENCY_DECIMAL_PLACES', 2)
|
rescale/django-money
|
djmoney/settings.py
|
Python
|
bsd-3-clause
| 987 | 0.001013 |
# coding=utf-8
from _commandbase import RadianceCommand
from ..datatype import RadiancePath, RadianceTuple
from ..parameters.gensky import GenskyParameters
import os
class Gensky(RadianceCommand):
u"""
gensky - Generate an annual Perez sky matrix from a weather tape.
The attributes for this class and their data descriptors are given below.
Please note that the first two inputs for each descriptor are for internal
naming purposes only.
Attributes:
outputName: An optional name for output file name (Default: 'untitled').
monthDayHour: A tuple containing inputs for month, day and hour.
genskyParameters: Radiance parameters for gensky. If None Default
parameters will be set. You can use self.genskyParameters to view,
add or remove the parameters before executing the command.
Usage:
from honeybee.radiance.parameters.gensky import GenSkyParameters
from honeybee.radiance.command.gensky import GenSky
# create and modify genskyParameters. In this case a sunny with no sun
# will be generated.
gnskyParam = GenSkyParameters()
gnskyParam.sunnySkyNoSun = True
# create the gensky Command.
gnsky = GenSky(monthDayHour=(1,1,11), genskyParameters=gnskyParam,
outputName = r'd:/sunnyWSun_010111.sky' )
# run gensky
gnsky.execute()
>
"""
monthDayHour = RadianceTuple('monthDayHour', 'month day hour', tupleSize=3,
testType=False)
outputFile = RadiancePath('outputFile', descriptiveName='output sky file',
relativePath=None, checkExists=False)
def __init__(self, outputName='untitled', monthDayHour=None,
genskyParameters=None):
"""Init command."""
RadianceCommand.__init__(self)
self.outputFile = outputName if outputName.lower().endswith(".sky") \
else outputName + ".sky"
"""results file for sky (Default: untitled)"""
self.monthDayHour = monthDayHour
self.genskyParameters = genskyParameters
@classmethod
def fromSkyType(cls, outputName='untitled', monthDayHour=(1, 21, 12),
skyType=0, latitude=None, longitude=None, meridian=None):
"""Create a sky by sky type.
Args:
outputName: An optional name for output file name (Default: 'untitled').
monthDayHour: A tuple containing inputs for month, day and hour.
skyType: An intger between 0-5 for CIE sky type.
0: [+s] Sunny with sun, 1: [-s] Sunny without sun,
2: [+i] Intermediate with sun, 3: [-i] Intermediate with no sun,
4: [-c] Cloudy overcast sky, 5: [-u] Uniform cloudy sky
latitude: [-a] A float number to indicate site altitude. Negative
angle indicates south latitude.
longitude: [-o] A float number to indicate site latitude. Negative
angle indicates east longitude.
meridian: [-m] A float number to indicate site meridian west of
Greenwich.
"""
_skyParameters = GenskyParameters(latitude=latitude, longitude=longitude,
meridian=meridian)
# modify parameters based on sky type
try:
skyType = int(skyType)
except TypeError:
"skyType should be an integer between 0-5."
assert 0 <= skyType <= 5, "Sky type should be an integer between 0-5."
if skyType == 0:
_skyParameters.sunnySky = True
elif skyType == 1:
_skyParameters.sunnySky = False
elif skyType == 2:
_skyParameters.intermSky = True
elif skyType == 3:
_skyParameters.intermSky = False
elif skyType == 4:
_skyParameters.cloudySky = True
elif skyType == 5:
_skyParameters.uniformCloudySky = True
return cls(outputName=outputName, monthDayHour=monthDayHour,
genskyParameters=_skyParameters)
@classmethod
def createUniformSkyfromIlluminanceValue(cls, outputName="untitled",
illuminanceValue=10000):
"""Uniform CIE sky based on illuminance value.
Attributes:
outputName: An optional name for output file name (Default: 'untitled').
illuminanceValue: Desired illuminance value in lux
"""
assert float(illuminanceValue) >= 0, "Illuminace value can't be negative."
_skyParameters = GenskyParameters(zenithBrightHorzDiff=illuminanceValue / 179.0)
return cls(outputName=outputName, genskyParameters=_skyParameters)
@classmethod
def fromRadiationValues(cls):
"""Create a sky based on sky radiation values."""
raise NotImplementedError()
@property
def genskyParameters(self):
"""Get and set genskyParameters."""
return self.__genskyParameters
@genskyParameters.setter
def genskyParameters(self, genskyParam):
self.__genskyParameters = genskyParam if genskyParam is not None \
else GenskyParameters()
assert hasattr(self.genskyParameters, "isRadianceParameters"), \
"input genskyParameters is not a valid parameters type."
def toRadString(self, relativePath=False):
"""Return full command as a string."""
# generate the name from self.weaFile
radString = "%s %s %s > %s" % (
self.normspace(os.path.join(self.radbinPath, 'gensky')),
self.monthDayHour.toRadString().replace("-monthDayHour ", ""),
self.genskyParameters.toRadString(),
self.normspace(self.outputFile.toRadString())
)
return radString
@property
def inputFiles(self):
"""Input files for this command."""
return None
|
antonszilasi/honeybeex
|
honeybeex/honeybee/radiance/command/gensky.py
|
Python
|
gpl-3.0
| 5,946 | 0.001177 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from keystone.common.policies import base
DEPRECATED_REASON = (
"The domain API is now aware of system scope and default roles."
)
deprecated_list_domains = policy.DeprecatedRule(
name=base.IDENTITY % 'list_domains',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_get_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'get_domain',
check_str=base.RULE_ADMIN_OR_TARGET_DOMAIN,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'update_domain',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'create_domain',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_domain',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER = (
'(role:reader and system_scope:all) or '
'token.domain.id:%(target.domain.id)s or '
'token.project.domain.id:%(target.domain.id)s'
)
domain_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_domain',
# NOTE(lbragstad): This policy allows system, domain, and
# project-scoped tokens.
check_str=SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER,
scope_types=['system', 'domain', 'project'],
description='Show domain details.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'GET'}],
deprecated_rule=deprecated_get_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_domains',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='List domains.',
operations=[{'path': '/v3/domains',
'method': 'GET'}],
deprecated_rule=deprecated_list_domains),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_domain',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Create domain.',
operations=[{'path': '/v3/domains',
'method': 'POST'}],
deprecated_rule=deprecated_create_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_domain',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Update domain.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'PATCH'}],
deprecated_rule=deprecated_update_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_domain',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete domain.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_domain),
]
def list_rules():
return domain_policies
|
openstack/keystone
|
keystone/common/policies/domain.py
|
Python
|
apache-2.0
| 3,937 | 0 |
""" process_model.py
Usage:
process_model.py <command> <project_code> <full_model_path> [options]
Arguments:
command action to be run on model, like: qc, audit or dwf
currently available: qc, audit, dwf
project_code unique project code consisting of 'projectnumber_projectModelPart'
like 456_11 , 416_T99 or 377_S
full_model_path revit model path including file name
use cfg shortcut if your full model path is already set in config.ini
Options:
-h, --help Show this help screen.
--viewer run revit in viewer mode (-> no transactions)
--html_path=<html> path to store html bokeh graphs, default in /commands/qc/*.html
--write_warn_ids write warning ids from warning command
--rvt_path=<rvt> full path to force specific rvt version other than detected
--rvt_ver=<rvtver> specify revit version and skip checking revit file version
(helpful if opening revit server files)
--audit activate open model with audit
--noworkshared open non-workshared model
--nodetach do not open workshared model detached
--notify choose to be notified with configured notify module(s)
--nofilecheck skips verifying model path actually exists
(helpful if opening revit server files)
--skip_hash_unchanged skips processing unchanged file
--timeout=<seconds> timeout in seconds before revit process gets terminated
"""
from docopt import docopt
import os
import pathlib
import hashlib
import subprocess
import psutil
import configparser
import time
import datetime
import logging
import colorful as col
import rvt_detector
from collections import defaultdict
from importlib import machinery
from tinydb import TinyDB, Query
from utils import rvt_journal_parser, rvt_journal_purger
from utils.win_utils import proc_open_files
from utils.rms_paths import get_paths
from notify.email import send_mail
from notify.slack import send_slack
from notify.req_post import send_post
def check_cfg_path(prj_number, cfg_str_or_path, cfg_path):
config = configparser.ConfigParser()
ini_file = cfg_path / "config.ini"
if cfg_str_or_path == "cfg":
if not cfg_str_or_path.exists():
if ini_file.exists():
config.read(ini_file)
if prj_number in config:
config_path = config[prj_number]["path"]
return config_path
return pathlib.Path(cfg_str_or_path)
def get_model_hash(rvt_model_path):
"""
Creates a hash of provided rvt model file
:param rvt_model_path:
:return: hash string
"""
BLOCKSIZE = 65536
hasher = hashlib.sha256()
with open(rvt_model_path, "rb") as rvt:
buf = rvt.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = rvt.read(BLOCKSIZE)
return hasher.hexdigest()
def check_hash_unchanged(hash_db, rvt_model_path, model_hash, date):
model_info = {"<full_model_path>": str(rvt_model_path),
">last_hash": model_hash,
">last_hash_date": date,
}
unchanged = hash_db.search((Query()["<full_model_path>"] == str(rvt_model_path)) &
(Query()[">last_hash"] == model_hash)
)
if unchanged:
return True
else:
hash_db.upsert(model_info, Query()["<full_model_path>"] == str(rvt_model_path)
)
def exit_with_log(message, severity=logging.warning, exit_return_code=1):
"""
Ends the whole script with a warning.
:param message:
:param exit_return_code:
:return:
"""
severity(f"{project_code};{current_proc_hash};{exit_return_code};;{message}")
exit()
def get_jrn_and_post_process(search_command, commands_dir):
"""
Searches command paths for register dict in __init__.py in command roots to
prepare appropriate command strings to be inserted into the journal file
:param search_command: command name to look up
:param commands_dir: commands directory
:return: command module, post process dict
"""
found_dir = False
module_rjm = None
post_proc_dict = defaultdict()
for directory in os.scandir(commands_dir):
command_name = directory.name
# print(command_name)
if search_command == command_name:
found_dir = True
print(f" found appropriate command directory {commands_dir / command_name}")
mod_init = commands_dir / command_name / "__init__.py"
if mod_init.exists():
mod = machinery.SourceFileLoader(command_name, str(mod_init)).load_module()
if "register" in dir(mod):
if mod.register["name"] == command_name:
if "rjm" in mod.register:
module_rjm = mod.register["rjm"]
if "post_process" in mod.register:
external_args = []
for arg in mod.register["post_process"]["args"]:
external_args.append(globals().get(arg))
post_proc_dict["func"] = mod.register["post_process"]["func"]
post_proc_dict["args"] = external_args
else:
exit_with_log('__init__.py in command directory not found')
if not found_dir:
print(col.bold_red(f" appropriate command directory for '{search_command}' not found - aborting."))
exit_with_log('command directory not found')
return module_rjm, post_proc_dict
def get_rvt_proc_journal(process, jrn_file_path):
open_files = process.open_files()
for proc_file in open_files:
file_name = pathlib.Path(proc_file.path).name
if file_name.startswith("journal"):
return proc_file.path
# if nothing found using the process.open_files
# dig deeper and get nasty
for proc_res in proc_open_files(process):
res_name = pathlib.Path(proc_res).name
if res_name.startswith("journal") and res_name.endswith("txt"):
return jrn_file_path / res_name
today_int = int(datetime.date.today().strftime("%Y%m%d"))
rms_paths = get_paths(__file__)
args = docopt(__doc__)
command = args["<command>"]
project_code = args["<project_code>"]
full_model_path = args["<full_model_path>"]
full_model_path = check_cfg_path(project_code, full_model_path, rms_paths.root)
model_path = full_model_path.parent
model_file_name = full_model_path.name
timeout = args["--timeout"]
html_path = args["--html_path"]
write_warn_ids = args["--write_warn_ids"]
rvt_override_path = args["--rvt_path"]
rvt_override_version = args["--rvt_ver"]
notify = args["--notify"]
disable_filecheck = args["--nofilecheck"]
disable_detach = args["--nodetach"]
disable_ws = args["--noworkshared"]
skip_hash_unchanged = args["--skip_hash_unchanged"]
audit = args["--audit"]
viewer = args["--viewer"]
if viewer:
viewer = "/viewer"
comma_concat_args = ",".join([f"{k}={v}" for k, v in args.items()])
print(col.bold_blue(f"+process model job control started with command: {command}"))
print(col.bold_orange(f"-detected following root path:"))
print(f" {rms_paths.root}")
format_json = {"sort_keys": True, "indent": 4, "separators": (',', ': ')}
hashes_db = TinyDB(rms_paths.db / "model_hashes.json", **format_json)
journal_file_path = rms_paths.journals / f"{project_code}.txt"
model_exists = full_model_path.exists()
timeout = int(timeout) if timeout else 60
if not html_path:
if command == "qc":
html_path = rms_paths.com_qc
elif command == "warnings":
html_path = rms_paths.com_warnings
elif not pathlib.Path(html_path).exists():
if command == "qc":
html_path = rms_paths.com_qc
print(f"your specified html path was not found - will export html graph to {rms_paths.com_qc} instead")
elif command == "warnings":
html_path = rms_paths.com_warnings
print(f"your specified html path was not found - will export html graph to {rms_paths.com_warnings} instead")
if write_warn_ids:
warn_ids_path = model_path / "RVT_fixme"
pathlib.Path(warn_ids_path).mkdir(exist_ok=True)
print(warn_ids_path)
else:
warn_ids_path = ""
job_logging = rms_paths.logs / "job_logging.csv"
header_logging = "time_stamp;level;project;process_hash;error_code;args;comments\n"
if not job_logging.exists():
with open(job_logging, "w") as logging_file:
logging_file.write(header_logging)
print(col.bold_blue(f"logging goes to: {job_logging}"))
logging.basicConfig(format='%(asctime)s;%(levelname)s;%(message)s',
datefmt="%Y%m%dT%H%M%SZ",
filename=job_logging,
level=logging.INFO)
logger = logging.getLogger(__name__)
logging.getLogger("bokeh").setLevel(logging.CRITICAL)
print(col.bold_orange('-detected following process structure:'))
current_proc_hash = hash(psutil.Process())
print(f" current process hash: {col.cyan(current_proc_hash)}")
logging.info(f"{project_code};{current_proc_hash};;{comma_concat_args};{'task_started'}")
if skip_hash_unchanged:
model_hash = get_model_hash(full_model_path)
print(f" model hash: {col.cyan(model_hash)}")
hash_unchanged = check_hash_unchanged(hashes_db, full_model_path, model_hash, today_int)
if hash_unchanged:
print(col.bold_red(f" model hash has not changed since last run!"))
print(col.bold_red(f" processing this model is skipped!!"))
time.sleep(1)
exit_with_log("unchanged_model", severity=logging.info, exit_return_code=0)
os.environ["RVT_QC_PRJ"] = project_code
os.environ["RVT_QC_PATH"] = str(full_model_path)
os.environ["RVT_LOG_PATH"] = str(rms_paths.logs)
if not rvt_override_version:
rvt_model_version = rvt_detector.get_rvt_file_version(full_model_path)
else:
rvt_model_version = rvt_override_version
if not rvt_override_path:
rvt_install_path = rvt_detector.installed_rvt_detection().get(rvt_model_version)
if not rvt_install_path:
print(f"no installed rvt versions for {rvt_model_version} detected - please use '--rvt_path' to specify path.")
logging.warning(f"{project_code};{current_proc_hash};1;;{'no rvt versions for {rvt_model_version} detected'}")
exit()
rvt_install_path = pathlib.Path(rvt_install_path)
else:
rvt_install_path = pathlib.Path(rvt_override_path)
mod_rjm, post_proc = get_jrn_and_post_process(command, rms_paths.commands)
if disable_filecheck or model_exists:
mod_rjm(project_code, full_model_path, journal_file_path, rms_paths.commands, rms_paths.logs)
proc_args = [arg for arg in [str(rvt_install_path), str(journal_file_path), viewer] if arg]
# print(proc_args)
run_proc = psutil.Popen(proc_args, cwd=str(rms_paths.root), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
run_proc_name = run_proc.name()
# let's wait half a second for rvt process to fire up
time.sleep(0.5)
if run_proc.name() == "Revit.exe":
proc_name_colored = col.bold_green(run_proc_name)
else:
proc_name_colored = col.bold_red(run_proc_name)
print(f" process info: {run_proc.pid} - {proc_name_colored}")
print(col.bold_orange("-detected revit:"))
print(f" version:{rvt_model_version} at path: {rvt_install_path}")
print(col.bold_orange("-process termination countdown:"))
# print(f" timeout until termination of process: {run_proc_id} - {proc_name_colored}:")
log_journal = get_rvt_proc_journal(run_proc, rms_paths.journals)
return_code = 9
return_logging = logging.info
# the main timeout loop
for sec in range(timeout):
time.sleep(1)
poll = run_proc.poll()
print(f" {str(timeout-sec).zfill(4)} seconds, proc poll: {poll}", end="\r")
if poll == 0:
print(col.bold_green(f" {poll} - revit finished!"))
return_code = "0"
return_logging = logging.info
break
elif timeout-sec-1 == 0:
print("\n")
print(col.bold_red(" timeout!!"))
if not poll:
print(col.bold_red(f" kill process now: {run_proc.pid}"))
run_proc.kill()
return_code = "1"
return_logging = logging.warning
# post loop processing, naively parsing journal files
print(col.bold_orange("-post process:"))
print(f" process open journal for post process parsing:\n {log_journal}")
log_journal_result = rvt_journal_parser.read_journal(log_journal)
log_journal_result = ",".join([f"{k}: {v}" for k, v in log_journal_result.items()])
if log_journal_result:
print(f" detected: {log_journal_result}")
if "corrupt" in log_journal_result:
return_logging = logging.critical
# run all notify modules
if notify:
notify_modules = [send_mail, send_slack, send_post]
for notify_function in notify_modules:
notify_function.notify(project_code, full_model_path, log_journal_result)
# getting post process funcs and args from command module for updating graphs and custom functionality
if post_proc:
post_proc["func"](*post_proc["args"])
# write log according to return code
logged_journal_excerpt = log_journal_result.strip('\n').strip('\r')
return_logging(f"{project_code};{current_proc_hash};{return_code};;{logged_journal_excerpt}")
# finally journal cleanup
rvt_journal_purger.purge(rms_paths.journals)
else:
print("model not found")
logging.warning(f"{project_code};{current_proc_hash};1;;{'model not found'}")
print(col.bold_blue("+process model job control script ended"))
|
hdm-dt-fb/rvt_model_services
|
process_model.py
|
Python
|
mit
| 14,344 | 0.00251 |
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
txt = Text("So long!\nThanks for all the fish.",
font = "Droid Serif",
fontsize = 20,
fontweight = BOLD,
lineheight = 1.2,
fill = color(0.25))
# Text.style() can be used to style individual characters in the text.
# It takes a start index, a stop index, and optional styling parameters:
txt.style(9, len(txt), fontsize=txt.fontsize/2, fontweight=NORMAL)
def draw(canvas):
canvas.clear()
x = (canvas.width - textwidth(txt)) / 2
y = 250
text(txt, x, y)
canvas.size = 500, 500
canvas.run(draw)
|
pepsipepsi/nodebox_opengl_python3
|
examples/04-text/02-style.py
|
Python
|
bsd-3-clause
| 687 | 0.039301 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance, Float
from traitsui.api import View, Item, UItem, VGroup
# ============= standard library imports ========================
from uncertainties import nominal_value
# ============= local library imports ==========================
from pychron.envisage.tasks.base_editor import BaseTraitsEditor
from pychron.graph.stacked_regression_graph import StackedRegressionGraph
class RatioEditor(BaseTraitsEditor):
"""
"""
graph = Instance(StackedRegressionGraph)
intercept_ratio = Float
time_zero_offset = Float(0, auto_set=False, enter_set=True)
ratio_intercept = Float
basename = ''
def _time_zero_offset_changed(self):
self.refresh_plot()
def setup(self):
self.data = self.analysis.isotopes
self.setup_graph()
def setup_graph(self):
cd = dict(padding=20,
spacing=5,
stack_order='top_to_bottom')
g = StackedRegressionGraph(container_dict=cd)
self.graph = g
self.refresh_plot()
def refresh_plot(self):
g = self.graph
d = self.data
g.clear()
for ni, di in [('Ar40', 'Ar39')]:
niso, diso = d[ni], d[di]
self.plot_ratio(g, niso, diso)
self.intercept_ratio = nominal_value(niso.uvalue / diso.uvalue)
def plot_ratio(self, g, niso, diso):
niso.time_zero_offset = self.time_zero_offset
diso.time_zero_offset = self.time_zero_offset
fd = {'filter_outliers': True, 'std_devs': 2, 'iterations': 1}
niso.filter_outliers_dict = fd
diso.filter_outliers_dict = fd
niso.dirty = True
diso.dirty = True
g.new_plot()
g.set_x_limits(min_=0, max_=100)
g.set_y_title(niso.name)
_,_,nl = g.new_series(niso.offset_xs, niso.ys, filter_outliers_dict=fd)
g.new_plot()
g.set_y_title(diso.name)
_,_,dl = g.new_series(diso.offset_xs, diso.ys, filter_outliers_dict=fd)
# g.new_plot()
# nreg = nl.regressor
# dreg = dl.regressor
#
# xs = nreg.xs
# ys = nreg.predict(xs)/dreg.predict(xs)
# _,_,l =g.new_series(xs, ys, fit='parabolic')
# reg = l.regressor
# self.regressed_ratio_intercept = reg.predict(0)
# xs = linspace(0, 100)
# rys = niso.regressor.predict(xs) / diso.regressor.predict(xs)
xs = niso.offset_xs
rys = niso.ys / diso.ys
g.new_plot()
g.set_y_title('{}/{}'.format(niso.name, diso.name))
g.set_x_title('Time (s)')
# p,s,l = g.new_series(xs, rys, fit='parabolic', filter_outliers_dict=fd)
fd = {'filter_outliers': True, 'std_devs': 2, 'iterations': 1}
fitfunc = lambda p, x: (p[0]*x+p[1])/(p[2]*x+p[3])
fit = ((fitfunc, [1,1,1,1]), None)
p,s,l = g.new_series(xs, rys, fit=fit,
use_error_envelope=False,
filter_outliers_dict=fd)
reg = l.regressor
self.ratio_intercept = reg.predict(0)
def traits_view(self):
v = View(UItem('graph', style='custom'),
VGroup(Item('time_zero_offset'),
Item('intercept_ratio', style='readonly'),
Item('ratio_intercept', style='readonly')))
return v
if __name__ == '__main__':
re = RatioEditor()
re.setup()
re.configure_traits()
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/processing/ratios/ratio_editor.py
|
Python
|
apache-2.0
| 4,353 | 0.003446 |
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='d;z^Q:0HDfCSKXQE|zp&U8)n)P7Y[E<r0nY*m)F&1`*t$>gf9N')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
hqpr/findyour3d
|
config/settings/local.py
|
Python
|
mit
| 1,853 | 0.001079 |
/usr/share/pyshared/gwibber/microblog/urlshorter/zima.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/pymodules/python2.7/gwibber/microblog/urlshorter/zima.py
|
Python
|
gpl-3.0
| 56 | 0.017857 |
import sys
import re
for lines in open(sys.argv[1], "rU"):
line = lines.strip()
lexemes = re.split(".out:", line)
oid = lexemes[0].split(".")[1]
ncbi = re.split("val=|'", lexemes[1])[2]
print oid + " \t" + ncbi
|
fandemonium/code
|
parsers/img_oid_to_ncbi_from_html.py
|
Python
|
mit
| 234 | 0.004274 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ClusterDestroyedEvent(vim, *args, **kwargs):
'''This event records when a cluster is destroyed.'''
obj = vim.client.factory.create('ns0:ClusterDestroyedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/cluster_destroyed_event.py
|
Python
|
mit
| 1,147 | 0.00959 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv4",
"unnumbered",
"interface-ref",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config (container)
YANG Description: Configured reference to interface / subinterface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configured reference to interface / subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/__init__.py
|
Python
|
apache-2.0
| 9,237 | 0.001191 |
#!/usr/bin/env python
""" Calculate the Julian Date """
import time
import math
t = time.time()
""" Technically, we should be adding 2440587.5,
however, since we are trying to stick to the stardate
concept, we add only 40587.5"""
jd = (t / 86400.0 + 40587.5)
# Use the idea that 10 Julian days is equal to 1 stardate
print "%05.2f" % jd
|
casep/Molido
|
sdate.py
|
Python
|
gpl-2.0
| 350 | 0.002857 |
# -*- coding: utf-8 -*-
import sys
import time
import json
import datetime
import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
import robot
from robot.items import RobotItem
from robot.models.base import Base
from robot.settings import logger
class EladiedSinaComCnItem(RobotItem):
title = scrapy.Field() #标题
cl = scrapy.Field() #分类
picmsg = scrapy.Field() #图片信息
time = scrapy.Field() #创建时间
@property
def module(self):
return 'news'
class Process(Base):
def __init__(self):
pass
def process(self, item):
@robot.utils.checkHave
def havePicGirl():
"""检测是否存在条目"""
sql = """SELECT * FROM girlpic WHERE picmsg=%s"""
return sql, self.db, [item['picmsg']]
if not havePicGirl():
sql = """INSERT INTO girlpic (title, cl, picmsg, createtime) values (%s, %s, %s, %s)"""
self.db.insert(sql, item['title'], item['cl'], item['picmsg'], datetime.date.today())
class EladiedSinaComCn(scrapy.Spider):
"""
抓取微信搜索首页内容
"""
name = "eladies_sina_com_cn"
allowed_domains = ["sina.com.cn", ]
start_urls = ["http://eladies.sina.com.cn/photo/", ]
def parse(self, response):
logger.info('[%s] %s' % (datetime.date.today(), response.url))
hxs = HtmlXPathSelector(response)
l = [
# 视觉大片
{'id': 'SI_Scroll_2_Cont', 'cl': 'photograph_gallery'},
# 八卦
{'id': 'SI_Scroll_3_Cont', 'cl': 'gossip'},
# 服饰搭配
{'id': 'SI_Scroll_4_Cont', 'cl': 'style'},
# 美体瘦身
{'id': 'SI_Scroll_5_Cont', 'cl': 'body'},
# 彩妆美发
{'id': 'SI_Scroll_6_Cont', 'cl': 'beauty'},
]
for d in l:
sites = hxs.select('//div[@id="%s"]/div/div/a/@href' % d['id']).extract()
for site in sites:
cl = d['cl']
request = Request(site, callback=self.deepParse, meta={'cl': cl},)
yield request
def deepParse(self, response):
hxs = HtmlXPathSelector(response)
item = EladiedSinaComCnItem()
item['title'] = hxs.select('//div[@id="eData"]/dl[1]/dt/text()').extract()[0]
picl = hxs.select('//div[@id="eData"]/dl/dd[1]/text()').extract()
descl = hxs.select('//div[@id="eData"]/dl/dd[5]/text()').extract()
item['time'] = time.strftime("%Y-%m-%d", time.localtime(int(time.time())))
item['cl'] = response.meta['cl']
item['picmsg'] = json.dumps([{'pic': pic, 'desc': desc} for (pic, desc) in zip(picl, descl)])
yield item
|
li282886931/apistore
|
robot/robot/news/eladies_sina_com_cn.py
|
Python
|
gpl-2.0
| 2,757 | 0.005261 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class AvailabilitySetsOperations(object):
"""AvailabilitySetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def create_or_update(
self, resource_group_name, availability_set_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set
operation.
:type parameters:
~azure.mgmt.compute.v2017_03_30.models.AvailabilitySet
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AvailabilitySet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_03_30.models.AvailabilitySet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AvailabilitySet')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailabilitySet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatusResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AvailabilitySet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_03_30.models.AvailabilitySet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailabilitySet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AvailabilitySet
:rtype:
~azure.mgmt.compute.v2017_03_30.models.AvailabilitySetPaged[~azure.mgmt.compute.v2017_03_30.models.AvailabilitySet]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.AvailabilitySetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AvailabilitySetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_available_sizes(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Lists all available virtual machine sizes that can be used to create a
new virtual machine in an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualMachineSize
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineSizePaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineSize]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/availability_sets_operations.py
|
Python
|
mit
| 17,248 | 0.002609 |
from __future__ import division
|
laowantong/mocodo
|
mocodo/tests/__init__.py
|
Python
|
mit
| 33 | 0 |
# -*- coding: utf-8 -*-
# gthnk (c) Ian Dennis Miller
import os
import flask
import logging
from flaskext.markdown import Markdown
from mdx_linkify.mdx_linkify import LinkifyExtension
from mdx_journal import JournalExtension
from . import db, login_manager, bcrypt
from .models.day import Day
from .models.entry import Entry
from .models.page import Page
from .models.user import User
def create_app():
app = flask.Flask(__name__)
try:
app.config.from_envvar('SETTINGS')
except RuntimeError:
default_filename = os.path.expanduser('~/.gthnk/gthnk.conf')
if os.path.isfile(default_filename):
print("WARN: using default configuration file ~/.gthnk/gthnk.conf")
app.config.from_pyfile(default_filename)
logging.basicConfig(
format='%(asctime)s %(module)-16s %(levelname)-8s %(message)s',
filename=app.config["LOG"],
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
logging.info("Server: Start")
logging.info("Database: {}".format(app.config['SQLALCHEMY_DATABASE_URI']))
from .blueprints.root import root
app.register_blueprint(root)
from .blueprints.auth import auth
app.register_blueprint(auth)
from .blueprints.day import day
app.register_blueprint(day)
# from .blueprints.attachments import attachments
# app.register_blueprint(attachments)
db.init_app(app)
login_manager.init_app(app)
bcrypt.init_app(app)
app.markdown = Markdown(app, extensions=[
LinkifyExtension(),
JournalExtension()
])
return app
app = create_app()
|
iandennismiller/gthnk
|
src/gthnk/server.py
|
Python
|
mit
| 1,615 | 0.000619 |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import sys
from datetime import datetime, timedelta, date, tzinfo
from decimal import Decimal as D
from uuid import uuid4, uuid1
from cassandra import InvalidRequest
from cassandra.cqlengine.columns import TimeUUID
from cassandra.cqlengine.columns import Ascii
from cassandra.cqlengine.columns import Text
from cassandra.cqlengine.columns import Integer
from cassandra.cqlengine.columns import BigInt
from cassandra.cqlengine.columns import VarInt
from cassandra.cqlengine.columns import DateTime
from cassandra.cqlengine.columns import Date
from cassandra.cqlengine.columns import UUID
from cassandra.cqlengine.columns import Boolean
from cassandra.cqlengine.columns import Decimal
from cassandra.cqlengine.columns import Inet
from cassandra.cqlengine.connection import execute
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model, ValidationError
from cassandra import util
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class TestDatetime(BaseCassEngTestCase):
class DatetimeTest(Model):
test_id = Integer(primary_key=True)
created_at = DateTime()
@classmethod
def setUpClass(cls):
sync_table(cls.DatetimeTest)
@classmethod
def tearDownClass(cls):
drop_table(cls.DatetimeTest)
def test_datetime_io(self):
now = datetime.now()
self.DatetimeTest.objects.create(test_id=0, created_at=now)
dt2 = self.DatetimeTest.objects(test_id=0).first()
assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6]
def test_datetime_tzinfo_io(self):
class TZ(tzinfo):
def utcoffset(self, date_time):
return timedelta(hours=-1)
def dst(self, date_time):
return None
now = datetime(1982, 1, 1, tzinfo=TZ())
dt = self.DatetimeTest.objects.create(test_id=1, created_at=now)
dt2 = self.DatetimeTest.objects(test_id=1).first()
assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6]
def test_datetime_date_support(self):
today = date.today()
self.DatetimeTest.objects.create(test_id=2, created_at=today)
dt2 = self.DatetimeTest.objects(test_id=2).first()
assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat()
def test_datetime_none(self):
dt = self.DatetimeTest.objects.create(test_id=3, created_at=None)
dt2 = self.DatetimeTest.objects(test_id=3).first()
assert dt2.created_at is None
dts = self.DatetimeTest.objects.filter(test_id=3).values_list('created_at')
assert dts[0][0] is None
def test_datetime_invalid(self):
dt_value= 'INVALID'
with self.assertRaises(TypeError):
self.DatetimeTest.objects.create(test_id=4, created_at=dt_value)
def test_datetime_timestamp(self):
dt_value = 1454520554
self.DatetimeTest.objects.create(test_id=5, created_at=dt_value)
dt2 = self.DatetimeTest.objects(test_id=5).first()
assert dt2.created_at == datetime.utcfromtimestamp(dt_value)
def test_datetime_large(self):
dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000)
self.DatetimeTest.objects.create(test_id=6, created_at=dt_value)
dt2 = self.DatetimeTest.objects(test_id=6).first()
assert dt2.created_at == dt_value
def test_datetime_truncate_microseconds(self):
"""
Test to ensure that truncate microseconds works as expected.
This will be default behavior in the future and we will need to modify the tests to comply
with new behavior
@since 3.2
@jira_ticket PYTHON-273
@expected_result microseconds should be to the nearest thousand when truncate is set.
@test_category object_mapper
"""
DateTime.truncate_microseconds = True
try:
dt_value = datetime(2024, 12, 31, 10, 10, 10, 923567)
dt_truncated = datetime(2024, 12, 31, 10, 10, 10, 923000)
self.DatetimeTest.objects.create(test_id=6, created_at=dt_value)
dt2 = self.DatetimeTest.objects(test_id=6).first()
self.assertEqual(dt2.created_at,dt_truncated)
finally:
# We need to always return behavior to default
DateTime.truncate_microseconds = False
class TestBoolDefault(BaseCassEngTestCase):
class BoolDefaultValueTest(Model):
test_id = Integer(primary_key=True)
stuff = Boolean(default=True)
@classmethod
def setUpClass(cls):
sync_table(cls.BoolDefaultValueTest)
def test_default_is_set(self):
tmp = self.BoolDefaultValueTest.create(test_id=1)
self.assertEqual(True, tmp.stuff)
tmp2 = self.BoolDefaultValueTest.get(test_id=1)
self.assertEqual(True, tmp2.stuff)
class TestBoolValidation(BaseCassEngTestCase):
class BoolValidationTest(Model):
test_id = Integer(primary_key=True)
bool_column = Boolean()
@classmethod
def setUpClass(cls):
sync_table(cls.BoolValidationTest)
def test_validation_preserves_none(self):
test_obj = self.BoolValidationTest(test_id=1)
test_obj.validate()
self.assertIsNone(test_obj.bool_column)
class TestVarInt(BaseCassEngTestCase):
class VarIntTest(Model):
test_id = Integer(primary_key=True)
bignum = VarInt(primary_key=True)
@classmethod
def setUpClass(cls):
sync_table(cls.VarIntTest)
@classmethod
def tearDownClass(cls):
sync_table(cls.VarIntTest)
def test_varint_io(self):
# TODO: this is a weird test. i changed the number from sys.maxint (which doesn't exist in python 3)
# to the giant number below and it broken between runs.
long_int = 92834902384092834092384028340283048239048203480234823048230482304820348239
int1 = self.VarIntTest.objects.create(test_id=0, bignum=long_int)
int2 = self.VarIntTest.objects(test_id=0).first()
self.assertEqual(int1.bignum, int2.bignum)
class TestDate(BaseCassEngTestCase):
class DateTest(Model):
test_id = Integer(primary_key=True)
created_at = Date()
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
return
sync_table(cls.DateTest)
@classmethod
def tearDownClass(cls):
if PROTOCOL_VERSION < 4:
return
drop_table(cls.DateTest)
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
def test_date_io(self):
today = date.today()
self.DateTest.objects.create(test_id=0, created_at=today)
result = self.DateTest.objects(test_id=0).first()
self.assertEqual(result.created_at, util.Date(today))
def test_date_io_using_datetime(self):
now = datetime.utcnow()
self.DateTest.objects.create(test_id=0, created_at=now)
result = self.DateTest.objects(test_id=0).first()
self.assertIsInstance(result.created_at, util.Date)
self.assertEqual(result.created_at, util.Date(now))
def test_date_none(self):
self.DateTest.objects.create(test_id=1, created_at=None)
dt2 = self.DateTest.objects(test_id=1).first()
assert dt2.created_at is None
dts = self.DateTest.objects(test_id=1).values_list('created_at')
assert dts[0][0] is None
class TestDecimal(BaseCassEngTestCase):
class DecimalTest(Model):
test_id = Integer(primary_key=True)
dec_val = Decimal()
@classmethod
def setUpClass(cls):
sync_table(cls.DecimalTest)
@classmethod
def tearDownClass(cls):
drop_table(cls.DecimalTest)
def test_decimal_io(self):
dt = self.DecimalTest.objects.create(test_id=0, dec_val=D('0.00'))
dt2 = self.DecimalTest.objects(test_id=0).first()
assert dt2.dec_val == dt.dec_val
dt = self.DecimalTest.objects.create(test_id=0, dec_val=5)
dt2 = self.DecimalTest.objects(test_id=0).first()
assert dt2.dec_val == D('5')
class TestUUID(BaseCassEngTestCase):
class UUIDTest(Model):
test_id = Integer(primary_key=True)
a_uuid = UUID(default=uuid4())
@classmethod
def setUpClass(cls):
sync_table(cls.UUIDTest)
@classmethod
def tearDownClass(cls):
drop_table(cls.UUIDTest)
def test_uuid_str_with_dashes(self):
a_uuid = uuid4()
t0 = self.UUIDTest.create(test_id=0, a_uuid=str(a_uuid))
t1 = self.UUIDTest.get(test_id=0)
assert a_uuid == t1.a_uuid
def test_uuid_str_no_dashes(self):
a_uuid = uuid4()
t0 = self.UUIDTest.create(test_id=1, a_uuid=a_uuid.hex)
t1 = self.UUIDTest.get(test_id=1)
assert a_uuid == t1.a_uuid
def test_uuid_with_upcase(self):
a_uuid = uuid4()
val = str(a_uuid).upper()
t0 = self.UUIDTest.create(test_id=0, a_uuid=val)
t1 = self.UUIDTest.get(test_id=0)
assert a_uuid == t1.a_uuid
class TestTimeUUID(BaseCassEngTestCase):
class TimeUUIDTest(Model):
test_id = Integer(primary_key=True)
timeuuid = TimeUUID(default=uuid1())
@classmethod
def setUpClass(cls):
sync_table(cls.TimeUUIDTest)
@classmethod
def tearDownClass(cls):
drop_table(cls.TimeUUIDTest)
def test_timeuuid_io(self):
"""
ensures that
:return:
"""
t0 = self.TimeUUIDTest.create(test_id=0)
t1 = self.TimeUUIDTest.get(test_id=0)
assert t1.timeuuid.time == t1.timeuuid.time
class TestInteger(BaseCassEngTestCase):
class IntegerTest(Model):
test_id = UUID(primary_key=True, default=lambda:uuid4())
value = Integer(default=0, required=True)
def test_default_zero_fields_validate(self):
""" Tests that integer columns with a default value of 0 validate """
it = self.IntegerTest()
it.validate()
class TestBigInt(BaseCassEngTestCase):
class BigIntTest(Model):
test_id = UUID(primary_key=True, default=lambda:uuid4())
value = BigInt(default=0, required=True)
def test_default_zero_fields_validate(self):
""" Tests that bigint columns with a default value of 0 validate """
it = self.BigIntTest()
it.validate()
class TestAscii(BaseCassEngTestCase):
def test_min_length(self):
""" Test arbitrary minimal lengths requirements. """
Ascii(min_length=0).validate('')
Ascii(min_length=0).validate(None)
Ascii(min_length=0).validate('kevin')
Ascii(min_length=1).validate('k')
Ascii(min_length=5).validate('kevin')
Ascii(min_length=5).validate('kevintastic')
with self.assertRaises(ValidationError):
Ascii(min_length=1).validate('')
with self.assertRaises(ValidationError):
Ascii(min_length=1).validate(None)
with self.assertRaises(ValidationError):
Ascii(min_length=6).validate('')
with self.assertRaises(ValidationError):
Ascii(min_length=6).validate(None)
with self.assertRaises(ValidationError):
Ascii(min_length=6).validate('kevin')
with self.assertRaises(ValueError):
Ascii(min_length=-1)
def test_max_length(self):
""" Test arbitrary maximal lengths requirements. """
Ascii(max_length=0).validate('')
Ascii(max_length=0).validate(None)
Ascii(max_length=1).validate('')
Ascii(max_length=1).validate(None)
Ascii(max_length=1).validate('b')
Ascii(max_length=5).validate('')
Ascii(max_length=5).validate(None)
Ascii(max_length=5).validate('b')
Ascii(max_length=5).validate('blake')
with self.assertRaises(ValidationError):
Ascii(max_length=0).validate('b')
with self.assertRaises(ValidationError):
Ascii(max_length=5).validate('blaketastic')
with self.assertRaises(ValueError):
Ascii(max_length=-1)
def test_length_range(self):
Ascii(min_length=0, max_length=0)
Ascii(min_length=0, max_length=1)
Ascii(min_length=10, max_length=10)
Ascii(min_length=10, max_length=11)
with self.assertRaises(ValueError):
Ascii(min_length=10, max_length=9)
with self.assertRaises(ValueError):
Ascii(min_length=1, max_length=0)
def test_type_checking(self):
Ascii().validate('string')
Ascii().validate(u'unicode')
Ascii().validate(bytearray('bytearray', encoding='ascii'))
with self.assertRaises(ValidationError):
Ascii().validate(5)
with self.assertRaises(ValidationError):
Ascii().validate(True)
Ascii().validate("!#$%&\'()*+,-./")
with self.assertRaises(ValidationError):
Ascii().validate('Beyonc' + chr(233))
if sys.version_info < (3, 1):
with self.assertRaises(ValidationError):
Ascii().validate(u'Beyonc' + unichr(233))
def test_unaltering_validation(self):
""" Test the validation step doesn't re-interpret values. """
self.assertEqual(Ascii().validate(''), '')
self.assertEqual(Ascii().validate(None), None)
self.assertEqual(Ascii().validate('yo'), 'yo')
def test_non_required_validation(self):
""" Tests that validation is ok on none and blank values if required is False. """
Ascii().validate('')
Ascii().validate(None)
def test_required_validation(self):
""" Tests that validation raise on none and blank values if value required. """
Ascii(required=True).validate('k')
with self.assertRaises(ValidationError):
Ascii(required=True).validate('')
with self.assertRaises(ValidationError):
Ascii(required=True).validate(None)
# With min_length set.
Ascii(required=True, min_length=0).validate('k')
Ascii(required=True, min_length=1).validate('k')
with self.assertRaises(ValidationError):
Ascii(required=True, min_length=2).validate('k')
# With max_length set.
Ascii(required=True, max_length=1).validate('k')
with self.assertRaises(ValidationError):
Ascii(required=True, max_length=2).validate('kevin')
with self.assertRaises(ValueError):
Ascii(required=True, max_length=0)
class TestText(BaseCassEngTestCase):
def test_min_length(self):
""" Test arbitrary minimal lengths requirements. """
Text(min_length=0).validate('')
Text(min_length=0).validate(None)
Text(min_length=0).validate('blake')
Text(min_length=1).validate('b')
Text(min_length=5).validate('blake')
Text(min_length=5).validate('blaketastic')
with self.assertRaises(ValidationError):
Text(min_length=1).validate('')
with self.assertRaises(ValidationError):
Text(min_length=1).validate(None)
with self.assertRaises(ValidationError):
Text(min_length=6).validate('')
with self.assertRaises(ValidationError):
Text(min_length=6).validate(None)
with self.assertRaises(ValidationError):
Text(min_length=6).validate('blake')
with self.assertRaises(ValueError):
Text(min_length=-1)
def test_max_length(self):
""" Test arbitrary maximal lengths requirements. """
Text(max_length=0).validate('')
Text(max_length=0).validate(None)
Text(max_length=1).validate('')
Text(max_length=1).validate(None)
Text(max_length=1).validate('b')
Text(max_length=5).validate('')
Text(max_length=5).validate(None)
Text(max_length=5).validate('b')
Text(max_length=5).validate('blake')
with self.assertRaises(ValidationError):
Text(max_length=0).validate('b')
with self.assertRaises(ValidationError):
Text(max_length=5).validate('blaketastic')
with self.assertRaises(ValueError):
Text(max_length=-1)
def test_length_range(self):
Text(min_length=0, max_length=0)
Text(min_length=0, max_length=1)
Text(min_length=10, max_length=10)
Text(min_length=10, max_length=11)
with self.assertRaises(ValueError):
Text(min_length=10, max_length=9)
with self.assertRaises(ValueError):
Text(min_length=1, max_length=0)
def test_type_checking(self):
Text().validate('string')
Text().validate(u'unicode')
Text().validate(bytearray('bytearray', encoding='ascii'))
with self.assertRaises(ValidationError):
Text().validate(5)
with self.assertRaises(ValidationError):
Text().validate(True)
Text().validate("!#$%&\'()*+,-./")
Text().validate('Beyonc' + chr(233))
if sys.version_info < (3, 1):
Text().validate(u'Beyonc' + unichr(233))
def test_unaltering_validation(self):
""" Test the validation step doesn't re-interpret values. """
self.assertEqual(Text().validate(''), '')
self.assertEqual(Text().validate(None), None)
self.assertEqual(Text().validate('yo'), 'yo')
def test_non_required_validation(self):
""" Tests that validation is ok on none and blank values if required is False """
Text().validate('')
Text().validate(None)
def test_required_validation(self):
""" Tests that validation raise on none and blank values if value required. """
Text(required=True).validate('b')
with self.assertRaises(ValidationError):
Text(required=True).validate('')
with self.assertRaises(ValidationError):
Text(required=True).validate(None)
# With min_length set.
Text(required=True, min_length=0).validate('b')
Text(required=True, min_length=1).validate('b')
with self.assertRaises(ValidationError):
Text(required=True, min_length=2).validate('b')
# With max_length set.
Text(required=True, max_length=1).validate('b')
with self.assertRaises(ValidationError):
Text(required=True, max_length=2).validate('blake')
with self.assertRaises(ValueError):
Text(required=True, max_length=0)
class TestExtraFieldsRaiseException(BaseCassEngTestCase):
class TestModel(Model):
id = UUID(primary_key=True, default=uuid4)
def test_extra_field(self):
with self.assertRaises(ValidationError):
self.TestModel.create(bacon=5000)
class TestPythonDoesntDieWhenExtraFieldIsInCassandra(BaseCassEngTestCase):
class TestModel(Model):
__table_name__ = 'alter_doesnt_break_running_app'
id = UUID(primary_key=True, default=uuid4)
def test_extra_field(self):
drop_table(self.TestModel)
sync_table(self.TestModel)
self.TestModel.create()
execute("ALTER TABLE {0} add blah int".format(self.TestModel.column_family_name(include_keyspace=True)))
self.TestModel.objects().all()
class TestTimeUUIDFromDatetime(BaseCassEngTestCase):
def test_conversion_specific_date(self):
dt = datetime(1981, 7, 11, microsecond=555000)
uuid = util.uuid_from_time(dt)
from uuid import UUID
assert isinstance(uuid, UUID)
ts = (uuid.time - 0x01b21dd213814000) / 1e7 # back to a timestamp
new_dt = datetime.utcfromtimestamp(ts)
# checks that we created a UUID1 with the proper timestamp
assert new_dt == dt
class TestInet(BaseCassEngTestCase):
class InetTestModel(Model):
id = UUID(primary_key=True, default=uuid4)
address = Inet()
def setUp(self):
drop_table(self.InetTestModel)
sync_table(self.InetTestModel)
def test_inet_saves(self):
tmp = self.InetTestModel.create(address="192.168.1.1")
m = self.InetTestModel.get(id=tmp.id)
assert m.address == "192.168.1.1"
def test_non_address_fails(self):
# TODO: presently this only tests that the server blows it up. Is there supposed to be local validation?
with self.assertRaises(InvalidRequest):
self.InetTestModel.create(address="what is going on here?")
|
Richard-Mathie/cassandra_benchmark
|
vendor/github.com/datastax/python-driver/tests/integration/cqlengine/columns/test_validation.py
|
Python
|
apache-2.0
| 21,274 | 0.001081 |
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handling for privileges for grant and deny clauses in ACEs
according to WebDAV ACP specification.
"""
from webdav import Constants
from webdav.Connection import WebdavError
__version__ = "$LastChangedRevision$"
class Privilege(object):
"""This class provides functionality for handling privileges for ACEs.
@ivar name: Name of the privilege.
@type name: C{string}
@cvar __privileges: List of allowed XML tags for privileges.
@type __privileges: C{tuple} of C{string}s
"""
__privileges = list()
def __init__(self, privilege=None, domroot=None):
"""
Constructor should be called with either no parameters (create blank Privilege),
one parameter (a DOM tree or privilege name to initialize it directly).
@param domroot: A DOM tree (default: None).
@type domroot: L{webdav.WebdavResponse.Element} object
@param privilege: The valid name of a privilege (default: None).
@type privilege: C{string}
@raise WebdavError: When non-valid parameters or sets of parameters are
passed a L{WebdavError} is raised.
"""
self.name = None
if domroot:
if len(domroot.children) != 1:
raise WebdavError('Wrong number of elements for Privilege constructor, we have: %i' \
% (len(domroot.children)))
else:
child = domroot.children[0]
if child.ns == Constants.NS_DAV and child.name in self.__privileges:
self.name = child.name
else:
raise WebdavError('Not a valid privilege tag, we have: %s%s' \
% (child.ns, child.name))
elif privilege:
if privilege in self.__privileges:
self.name = privilege
else:
raise WebdavError('Not a valid privilege tag, we have: %s.' % str(privilege))
@classmethod
def registerPrivileges(cls, privileges):
"""
Registers supported privilege tags.
@param privileges: List of privilege tags.
@type privileges: C{list} of C{unicode}
"""
for privilege in privileges:
cls.__privileges.append(privilege)
def __cmp__(self, other):
""" Compares two Privilege instances. """
if not isinstance(other, Privilege):
return 1
if self.name != other.name:
return 1
else:
return 0
def __repr__(self):
""" Returns the string representation of an instance. """
return '<class Privilege: name: "%s">' % (self.name)
def copy(self, other):
"""
Copy Privilege object.
@param other: Another privilege to copy.
@type other: L{Privilege} object
@raise WebdavError: When an object that is not a L{Privilege} is passed
a L{WebdavError} is raised.
"""
if not isinstance(other, Privilege):
raise WebdavError('Non-Privilege object passed to copy method: %s' % other.__class__)
self.name = other.name
def toXML(self):
"""
Returns privilege content as string in valid XML as described in WebDAV ACP.
@param defaultNameSpace: Name space (default: None).
@type defaultNameSpace: C(string)
"""
assert self.name != None, "privilege is not initialized or does not contain valid content!"
privilege = 'D:' + Constants.TAG_PRIVILEGE
return '<%s><D:%s/></%s>' % (privilege, self.name, privilege)
|
khertan/ownNotes
|
python/webdav/acp/Privilege.py
|
Python
|
gpl-3.0
| 4,423 | 0.007009 |
# Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
VERSION = '0.1.0'
|
domin1101/malmo-challenge
|
malmopy/version.py
|
Python
|
mit
| 1,232 | 0.007305 |
from django.db import models
from django.contrib.auth.models import User
from albaproject.settings import MEDIA_ROOT
import pdb
def _upload_to_generic(prefix_path=None, instance=None, field=None, filename=None):
#pdb.set_trace()
if not instance.pk: # generate DB PK if not present
instance.save()
if not prefix_path:
if not filename:
return '{0}/job_{1}/{2}'.format(instance.user.username, instance.pk,
field)
return '{0}/job_{1}/{2}/{3}'.format(instance.user.username, instance.pk,
field, filename)
return '{0}/{1}/job_{2}/{3}'.format(prefix_path, instance.user.username,
instance.pk, field)
class Job(models.Model):
def __unicode__(self):
return str(self.id)
def save(self, *args, **kwargs):
#pdb.set_trace()
_input = self.file_input
_job = self.mapred_job
_output = self.file_output
self.file_input = None
self.mapred_job = None
self.file_output = None
super(Job, self).save(*args,**kwargs)
self.save = super(Job, self).save
self.file_input = _input
self.mapred_job = _job
self.file_output = _output
self.save() #super.save
def input_dest(self, filename):
return _upload_to_generic(None, self, 'input', filename)
def mapred_dest(self, filename):
return _upload_to_generic(None, self, 'mapred', filename)
def output_dest(self, filename):
return _upload_to_generic(None, self, 'output', filename)
def output_path(self):
return _upload_to_generic(MEDIA_ROOT, self, 'output', None)
user = models.ForeignKey(User)
file_input = models.FileField(upload_to=input_dest, null=True)
mapred_job = models.FileField(upload_to=mapred_dest, null=True)
fully_qualified_job_impl_class = models.CharField(max_length=200, null=True)
file_output = models.FileField(upload_to=output_dest, null=True)
submission_date = models.DateTimeField(auto_now_add=True)
class Server(models.Model):
job = models.ForeignKey(Job)
openstack_id = models.CharField(max_length=200)
server_name = models.CharField(max_length=200)
vcpus = models.PositiveSmallIntegerField()
ram = models.PositiveIntegerField()
disk = models.PositiveIntegerField()
|
marcos-sb/quick-openstacked-hadoop
|
Alba/albaproject/mapred/models.py
|
Python
|
apache-2.0
| 2,416 | 0.009934 |
import collections
from django import forms
from django.forms.util import ErrorDict
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from amo import helpers
from applications.models import AppVersion
sort_by = (
('', _lazy(u'Keyword Match')),
('updated', _lazy(u'Updated', 'advanced_search_form_updated')),
('newest', _lazy(u'Created', 'advanced_search_form_newest')),
('weeklydownloads', _lazy(u'Downloads')),
('users', _lazy(u'Users')),
('averagerating', _lazy(u'Rating', 'advanced_search_form_rating')),
)
collection_sort_by = (
('weekly', _lazy(u'Most popular this week')),
('monthly', _lazy(u'Most popular this month')),
('all', _lazy(u'Most popular all time')),
('rating', _lazy(u'Highest Rated')),
('newest', _lazy(u'Newest')),
)
per_page = (20, 50, )
tuplize = lambda x: divmod(int(x * 10), 10)
# These releases were so minor that we don't want to search for them.
skip_versions = collections.defaultdict(list)
skip_versions[amo.FIREFOX] = [tuplize(v) for v in amo.FIREFOX.exclude_versions]
min_version = collections.defaultdict(lambda: (0, 0))
min_version.update({
amo.FIREFOX: tuplize(amo.FIREFOX.min_display_version),
amo.THUNDERBIRD: tuplize(amo.THUNDERBIRD.min_display_version),
amo.SEAMONKEY: tuplize(amo.SEAMONKEY.min_display_version),
amo.SUNBIRD: tuplize(amo.SUNBIRD.min_display_version),
})
def get_app_versions(app):
appversions = AppVersion.objects.filter(application=app.id)
min_ver, skip = min_version[app], skip_versions[app]
versions = [(a.major, a.minor1) for a in appversions]
strings = ['%s.%s' % v for v in sorted(set(versions), reverse=True)
if v >= min_ver and v not in skip]
return [('any', _('Any'))] + zip(strings, strings)
# Fake categories to slip some add-on types into the search groups.
_Cat = collections.namedtuple('Cat', 'id name weight type')
def get_search_groups(app):
sub = []
types_ = [t for t in (amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_THEME)
if t in app.types]
for type_ in types_:
sub.append(_Cat(0, amo.ADDON_TYPES[type_], 0, type_))
sub.extend(helpers.sidebar(app)[0])
sub = [('%s,%s' % (a.type, a.id), a.name) for a in
sorted(sub, key=lambda x: (x.weight, x.name))]
top_level = [('all', _('all add-ons')),
('collections', _('all collections')), ]
if amo.ADDON_PERSONA in app.types:
top_level += (('personas', _('all personas')),)
return top_level[:1] + sub + top_level[1:], top_level
SEARCH_CHOICES = (
('all', _lazy('search for add-ons')),
('collections', _lazy('search for collections')),
('personas', _lazy('search for personas')),
('apps', _lazy('search for apps')))
class SimpleSearchForm(forms.Form):
"""Powers the search box on every page."""
q = forms.CharField(required=False)
cat = forms.CharField(required=False, widget=forms.HiddenInput)
appver = forms.CharField(required=False, widget=forms.HiddenInput)
platform = forms.CharField(required=False, widget=forms.HiddenInput)
choices = dict(SEARCH_CHOICES)
def clean_cat(self):
self.data = dict(self.data.items())
return self.data.setdefault('cat', 'all')
def placeholder(self):
val = self.clean_cat()
return self.choices.get(val, self.choices['all'])
def SearchForm(request):
current_app = request.APP or amo.FIREFOX
search_groups, top_level = get_search_groups(current_app)
class _SearchForm(SimpleSearchForm):
cat = forms.ChoiceField(choices=search_groups, required=False)
# This gets replaced by a <select> with js.
lver = forms.ChoiceField(
label=_(u'{0} Version').format(unicode(current_app.pretty)),
choices=get_app_versions(current_app), required=False)
appver = forms.CharField(required=False)
atype = forms.TypedChoiceField(label=_('Type'),
choices=[(t, amo.ADDON_TYPE[t]) for t in amo.ADDON_SEARCH_TYPES],
required=False, coerce=int, empty_value=amo.ADDON_ANY)
pid = forms.TypedChoiceField(label=_('Platform'),
choices=[(p[0], p[1].name) for p in amo.PLATFORMS.iteritems()
if p[1] != amo.PLATFORM_ANY], required=False,
coerce=int, empty_value=amo.PLATFORM_ANY.id)
platform = forms.ChoiceField(required=False,
choices=[[p.shortname, p.id] for p in amo.PLATFORMS.values()])
sort = forms.ChoiceField(label=_('Sort By'), choices=sort_by,
required=False)
pp = forms.TypedChoiceField(label=_('Per Page'),
choices=zip(per_page, per_page), required=False, coerce=int,
empty_value=per_page[0])
advanced = forms.BooleanField(widget=forms.HiddenInput, required=False)
tag = forms.CharField(widget=forms.HiddenInput, required=False)
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
# Attach these to the form for usage in the template.
top_level_cat = dict(top_level)
def clean_platform(self):
p = self.cleaned_data.get('platform')
choices = dict(self.fields['platform'].choices)
return choices.get(p)
# TODO(jbalogh): when we start using this form for zamboni search, it
# should check that the appid and lver match up using app_versions.
def clean(self):
d = self.cleaned_data
raw = self.data
# Set some defaults
if not d.get('appid'):
d['appid'] = request.APP.id
# Since not all categories are listed in this form, we use the raw
# data.
if 'cat' in raw:
if ',' in raw['cat']:
try:
d['atype'], d['cat'] = map(int, raw['cat'].split(','))
except ValueError:
d['cat'] = None
elif raw['cat'] == 'all':
d['cat'] = None
if 'page' not in d or not d['page'] or d['page'] < 1:
d['page'] = 1
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
d = request.GET.copy()
return _SearchForm(d)
class SecondarySearchForm(forms.Form):
q = forms.CharField(widget=forms.HiddenInput, required=False)
cat = forms.CharField(widget=forms.HiddenInput)
pp = forms.CharField(widget=forms.HiddenInput, required=False)
sortby = forms.ChoiceField(label=_lazy(u'Sort By'),
choices=collection_sort_by,
initial='weekly', required=False)
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
def clean_pp(self):
d = self.cleaned_data['pp']
try:
return int(d)
except:
return per_page[0]
def clean(self):
d = self.cleaned_data
if not d.get('pp'):
d['pp'] = per_page[0]
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
SORT_CHOICES = (
(None, _lazy(u'Relevance')),
('users', _lazy(u'Most Users')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')),
# --
('name', _lazy(u'Name')),
('downloads', _lazy(u'Weekly Downloads')),
('updated', _lazy(u'Recently Updated')),
('hotness', _lazy(u'Up & Coming')),
)
APP_SORT_CHOICES = (
(None, _lazy(u'Relevance')),
('downloads', _lazy(u'Weekly Downloads')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')),
# --
('name', _lazy(u'Name')),
('hotness', _lazy(u'Up & Coming')),
)
class ESSearchForm(forms.Form):
q = forms.CharField(required=False)
tag = forms.CharField(required=False)
platform = forms.ChoiceField(required=False,
choices=[(p.shortname, p.id) for p in amo.PLATFORMS.values()])
appver = forms.CharField(required=False)
atype = forms.TypedChoiceField(required=False, coerce=int,
choices=[(t, amo.ADDON_TYPE[t]) for t in amo.ADDON_SEARCH_TYPES])
cat = forms.CharField(required=False)
sort = forms.ChoiceField(required=False, choices=SORT_CHOICES)
def __init__(self, *args, **kw):
addon_type = kw.pop('type', None)
super(ESSearchForm, self).__init__(*args, **kw)
if addon_type == amo.ADDON_WEBAPP:
self.fields['sort'].choices = APP_SORT_CHOICES
def clean_appver(self):
appver = self.cleaned_data.get('appver')
if appver:
major = appver.split('.')[0]
if major.isdigit():
appver = major + '.0'
return appver
def clean_sort(self):
sort = self.cleaned_data.get('sort')
return sort if sort in dict(SORT_CHOICES) else None
def clean_cat(self):
cat = self.cleaned_data.get('cat')
if ',' in cat:
try:
self.cleaned_data['atype'], cat = map(int, cat.split(','))
except ValueError:
return None
else:
try:
return int(cat)
except ValueError:
return None
def full_clean(self):
"""
Cleans self.data and populates self._errors and self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
|
jbalogh/zamboni
|
apps/search/forms.py
|
Python
|
bsd-3-clause
| 11,040 | 0.000815 |
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Calin Pavel <calin.pavel@codemart.ro>
"""
import os
import logging
from logging.handlers import MemoryHandler
from tvb.basic.profile import TvbProfile
from tvb.basic.logger.simple_handler import SimpleTimedRotatingFileHandler
class ClusterTimedRotatingFileHandler(MemoryHandler):
"""
This is a custom rotating file handler which computes the name of the file depending on the
execution environment (web node or cluster node)
"""
# Name of the log file where code from Web application will be stored
WEB_LOG_FILE = "web_application.log"
# Name of the file where to write logs from the code executed on cluster nodes
CLUSTER_NODES_LOG_FILE = "operations_executions.log"
# Size of the buffer which store log entries in memory
# in number of lines
BUFFER_CAPACITY = 20
def __init__(self, when='h', interval=1, backupCount=0):
"""
Constructor for logging formatter.
"""
# Formatting string
format_str = '%(asctime)s - %(levelname)s'
if TvbProfile.current.cluster.IN_OPERATION_EXECUTION_PROCESS:
log_file = self.CLUSTER_NODES_LOG_FILE
if TvbProfile.current.cluster.IS_RUNNING_ON_CLUSTER_NODE:
node_name = TvbProfile.current.cluster.CLUSTER_NODE_NAME
if node_name is not None:
format_str += ' [node:' + str(node_name) + '] '
else:
format_str += ' [proc:' + str(os.getpid()) + '] '
else:
log_file = self.WEB_LOG_FILE
format_str += ' - %(name)s - %(message)s'
rotating_file_handler = SimpleTimedRotatingFileHandler(log_file, when, interval, backupCount)
rotating_file_handler.setFormatter(logging.Formatter(format_str))
MemoryHandler.__init__(self, capacity=self.BUFFER_CAPACITY, target=rotating_file_handler)
|
rajul/tvb-framework
|
tvb/config/logger/cluster_handler.py
|
Python
|
gpl-2.0
| 3,331 | 0.006304 |
"""
===============================================
Demonstration for filling a histogram in a loop
===============================================
A simple, one dimensional histogram is filled in a loop with random
values. The result is than plotted with the build in plot command.
"""
from pyhistogram import Hist
import numpy as np
import matplotlib.pyplot as plt
h = Hist(20, -5, 5)
sample = np.random.normal(size=500)
for v in sample:
h.fill(v)
h.plot()
plt.show()
|
chrisboo/pyhistogram
|
examples/plot_simple_1D_hist_example.py
|
Python
|
gpl-3.0
| 478 | 0 |
import logging
from django.core.management.base import BaseCommand
from waldur_rancher.utils import SyncUser
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Sync users from Waldur to Rancher."""
def handle(self, *args, **options):
def print_message(count, action, name='user'):
if count == 1:
self.stdout.write(
self.style.SUCCESS('%s %s has been %s.' % (count, name, action))
)
else:
self.stdout.write(
self.style.SUCCESS('%s %ss have been %s.' % (count, name, action))
)
result = SyncUser.run()
for action in ['blocked', 'created', 'activated', 'updated']:
print_message(result.get(action, 0), action)
print_message(result.get('project roles deleted', 0), 'deleted', 'project role')
print_message(result('project roles created', 0), 'created', 'project role')
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_rancher/management/commands/sync_users.py
|
Python
|
mit
| 986 | 0.004057 |
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from contextlib import contextmanager
# this is stolen from python 3.4 :)
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
|
didrocks/cupstream2distro
|
cupstream2distro/utils.py
|
Python
|
gpl-3.0
| 904 | 0 |
from pythonthegathering import ManaPool, spell
pool = ManaPool()
@spell('WBB')
def boop(x):
print(x)
pool.tap('plains').tap('swamp').tap('swamp')
boop('boop', mana_pool=pool, mana_pay={'W': 1, 'B': 2})
|
linky00/pythonthegathering
|
test.py
|
Python
|
mit
| 209 | 0.009569 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-28 04:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atlas_doc', '0007_page_version'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='prev_rev',
field=models.UUIDField(blank=True, null=True),
),
]
|
robertdown/atlas_docs
|
atlas_doc/migrations/0008_auto_20170828_0043.py
|
Python
|
gpl-3.0
| 461 | 0 |
#!/usr/bin/env python
import math
fin = open('figs/single-rod-in-water.dat', 'r')
fout = open('figs/single-rods-calculated-density.dat', 'w')
kB = 3.16681539628059e-6 # This is Boltzmann's constant in Hartree/Kelvin
first = 1
nm = 18.8972613
for line in fin:
current = str(line)
pieces = current.split('\t')
if first:
r2 = float(pieces[0])/2*nm
E2 = float(pieces[1])
first = 0
else:
if ((float(pieces[0])/2*nm - r2) > 0.25):
r1 = r2
r2 = float(pieces[0])/2*nm
E1 = E2
E2 = float(pieces[1]) # actually it's energy per unit length!
length = 1 # arbitrary
r = (r1 + r2)/2
dEdR = (E2-E1)/(r2-r1)*length
area = 2*math.pi*r*length
force = dEdR
pressure = force/area
kT = kB*298 # about this
ncontact = pressure/kT
fout.write(str(r)+'\t'+str(ncontact)+'\n')
fin.close()
fout.close()
|
droundy/deft
|
papers/hughes-saft/figs/density_calc.py
|
Python
|
gpl-2.0
| 986 | 0.004057 |
from flask_user import login_required
from app.API_Rest.Services.BaseService import BaseService
from app.models.generadorJSON.respuestas_encuestas_generadorJSON import generarJSON_encuesta_alumno
from app.models.respuestas_encuesta_models import EncuestaAlumno, RespuestaEncuestaTematica, RespuestaEncuestaTags
from app.models.palabras_clave_models import PalabrasClaveParaMateria, TematicaPorMateria
from app.models.alumno_models import MateriasAlumno
from app.models.horarios_models import Curso
from app.API_Rest.codes import *
from app.models.respuestas_encuesta_models import RespuestaEncuestaAlumno, RespuestaEncuestaEstrellas
from app.DAO.EncuestasDAO import *
class EncuestaAlumnoService(BaseService):
def getNombreClaseServicio(self):
return "Encuesta Alumno Service"
##########################################
## Servicios ##
##########################################
@login_required
def get(self, idEncuestaAlumno):
self.logg_parametros_recibidos()
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
("idEncuestaAlumno", {
self.PARAMETRO: idEncuestaAlumno,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [EncuestaAlumno]),
(self.encuesta_pertenece_al_alumno, [])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
encuesta = EncuestaAlumno.query.get(idEncuestaAlumno)
result = (generarJSON_encuesta_alumno(encuesta), SUCCESS_OK)
self.logg_resultado(result)
return result
@login_required
def post(self, idEncuestaAlumno):
self.logg_parametros_recibidos()
alumno = self.obtener_alumno_usuario_actual()
if not alumno:
msj = "El usuario no tiene ningun alumno asociado"
self.logg_error(msj)
return {'Error': msj}, CLIENT_ERROR_NOT_FOUND
finalizada = self.obtener_booleano("finalizada")
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
("idEncuestaAlumno", {
self.PARAMETRO: idEncuestaAlumno,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [EncuestaAlumno]),
(self.encuesta_pertenece_al_alumno, []),
(self.encuesta_no_esta_finalizada, [])
]
}),
("finalizada", {
self.PARAMETRO: finalizada,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.booleano_es_valido, [])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
encuesta = EncuestaAlumno.query.get(idEncuestaAlumno)
encuesta.finalizada = finalizada
db.session.commit()
materiaAlumno = MateriasAlumno.query.get(encuesta.materia_alumno_id)
self.agregarPalabrasClavesALasMaterias(encuesta, materiaAlumno.materia_id)
self.agregarTematicasALasMaterias(encuesta, materiaAlumno.materia_id)
self.actualizar_puntaje_y_cantidad_encuestas_curso(encuesta, materiaAlumno.curso_id)
result = SUCCESS_NO_CONTENT
self.logg_resultado(result)
return result
def actualizar_puntaje_y_cantidad_encuestas_curso(self, encuesta, id_curso):
curso = Curso.query.get(id_curso)
curso.puntaje_total_encuestas += encuesta.obtener_cantidad_estrellas_elegidas()
curso.cantidad_encuestas_completas += 1
db.session.commit()
def agregarPalabrasClavesALasMaterias(self, encuesta, id_materia):
respuestas = RespuestaEncuestaTags.query\
.filter(RespuestaEncuestaTags.rta_encuesta_alumno_id.in_(
RespuestaEncuestaAlumno.query.with_entities(RespuestaEncuestaAlumno.id)
.filter_by(encuesta_alumno_id=encuesta.id)
)).all()
for respuesta in respuestas:
entrada = PalabrasClaveParaMateria.query.filter_by(materia_id=id_materia)\
.filter_by(palabra_clave_id=respuesta.palabra_clave_id).first()
if not entrada:
entrada = PalabrasClaveParaMateria(
materia_id=id_materia,
palabra_clave_id=respuesta.palabra_clave_id,
cantidad_encuestas_asociadas=0
)
db.session.add(entrada)
entrada.cantidad_encuestas_asociadas += 1
db.session.commit()
def agregarTematicasALasMaterias(self, encuesta, id_materia):
respuestas = RespuestaEncuestaTematica.query \
.filter(RespuestaEncuestaTematica.rta_encuesta_alumno_id.in_(
RespuestaEncuestaAlumno.query.with_entities(RespuestaEncuestaAlumno.id)
.filter_by(encuesta_alumno_id=encuesta.id)
)).all()
for respuesta in respuestas:
entrada = TematicaPorMateria.query.filter_by(materia_id=id_materia).\
filter_by(tematica_id=respuesta.tematica_id).first()
if not entrada:
entrada = TematicaPorMateria(
materia_id=id_materia,
tematica_id=respuesta.tematica_id,
cantidad_encuestas_asociadas=0
)
db.session.add(entrada)
entrada.cantidad_encuestas_asociadas += 1
db.session.commit()
def encuesta_no_esta_finalizada(self, nombre_parametro, valor, esObligatorio):
encuesta = EncuestaAlumno.query.get(valor)
return self.mensaje_OK(nombre_parametro) if not encuesta.finalizada \
else (False, 'La encuesta ya se encuentra finalizada', CLIENT_ERROR_METHOD_NOT_ALLOWED)
#########################################
CLASE = EncuestaAlumnoService
URLS_SERVICIOS = (
'/api/alumno/encuesta/<int:idEncuestaAlumno>',
)
#########################################
|
jennywoites/MUSSA
|
MUSSA_Flask/app/API_Rest/Services/AlumnoServices/EncuestaAlumnoService.py
|
Python
|
gpl-3.0
| 6,276 | 0.002709 |
#!/usr/bin/env python
from nipype.interfaces.io import FreeSurferSource, DataSink
from nipype.interfaces.utility import IdentityInterface
from nipype import Workflow, Node, MapNode, JoinNode, Function
import nibabel as nib
import numpy as np
import os
import surfdist as sd
import csv
def trimming(itemz, phrase):
item = [x for x in itemz if phrase in x][0]
return item
def genfname(hemi, source, target):
fname = hemi + '_' + source + '_' + target
return fname
def calc_surfdist(surface, labels, annot, reg, origin, target):
import nibabel as nib
import numpy as np
import os
from surfdist import load, utils, surfdist
import csv
""" inputs:
surface - surface file (e.g. lh.pial, with full path)
labels - label file (e.g. lh.cortex.label, with full path)
annot - annot file (e.g. lh.aparc.a2009s.annot, with full path)
reg - registration file (lh.sphere.reg)
origin - the label from which we calculate distances
target - target surface (e.g. fsaverage4)
"""
# Load stuff
surf = nib.freesurfer.read_geometry(surface)
cort = np.sort(nib.freesurfer.read_label(labels))
src = load.load_freesurfer_label(annot, origin, cort)
# Calculate distances
dist = surfdist.dist_calc(surf, cort, src)
# Project distances to target
trg = nib.freesurfer.read_geometry(target)[0]
native = nib.freesurfer.read_geometry(reg)[0]
idx_trg_to_native = utils.find_node_match(trg, native)[0]
# Get indices in trg space
distt = dist[idx_trg_to_native]
# Write to file and return file handle
filename = os.path.join(os.getcwd(),'distances.csv')
distt.tofile(filename,sep=",")
return filename
def stack_files(files, hemi, source, target):
"""
This function takes a list of files as input and vstacks them
"""
import csv
import os
import numpy as np
fname = "sdist_%s_%s_%s.csv" % (hemi, source, target)
filename = os.path.join(os.getcwd(),fname)
alldist = []
for dfile in files:
alldist.append(np.genfromtxt(dfile, delimiter=','))
alldist = np.array(alldist)
alldist.tofile(filename,",")
return filename
def create_surfdist_workflow(subjects_dir,
subject_list,
sources,
target,
hemi,
atlas,
labs,
name):
sd = Workflow(name=name)
# Run a separate tree for each template, hemisphere and source structure
infosource = Node(IdentityInterface(fields=['template','hemi','source']), name="infosource")
infosource.iterables = [('template', target),('hemi', hemi),('source',sources)]
# Get template files
fsst = Node(FreeSurferSource(),name='FS_Source_template')
fsst.inputs.subjects_dir = subjects_dir
sd.connect(infosource,'template',fsst,'subject_id')
sd.connect(infosource,'hemi',fsst,'hemi')
# Generate folder name for output
genfoldname = Node(Function(input_names=['hemi','source','target'],
output_names=['cname'], function=genfname),
name='genfoldname')
sd.connect(infosource,'hemi',genfoldname,'hemi')
sd.connect(infosource,'source',genfoldname,'source')
sd.connect(infosource,'template',genfoldname,'target')
# Get subjects
fss = Node(FreeSurferSource(),name='FS_Source')
fss.iterables = ('subject_id', subject_list)
fss.inputs.subjects_dir = subjects_dir
fss.inputs.subject_id = subject_list
sd.connect(infosource,'hemi',fss,'hemi')
# Trim labels
tlab = Node(Function(input_names=['itemz','phrase'],
output_names=['item'], function=trimming),
name='tlab')
tlab.inputs.phrase = labs
sd.connect(fss,'label',tlab,'itemz')
# Trim annotations
tannot = Node(Function(input_names=['itemz','phrase'],
output_names=['item'], function=trimming),
name='tannot')
tannot.inputs.phrase = atlas
sd.connect(fss,'annot',tannot,'itemz')
# Calculate distances for each hemi
sdist = Node(Function(input_names=['surface','labels','annot','reg','origin','target'],
output_names=['distances'], function=calc_surfdist),
name='sdist')
sd.connect(infosource,'source',sdist,'origin')
sd.connect(fss,'pial',sdist,'surface')
sd.connect(tlab,'item',sdist,'labels')
sd.connect(tannot,'item',sdist,'annot')
sd.connect(fss,'sphere_reg',sdist,'reg')
sd.connect(fsst,'sphere_reg',sdist,'target')
# Gather data for each hemi from all subjects
bucket = JoinNode(Function(input_names=['files','hemi','source','target'],output_names=['group_dist'],
function=stack_files), joinsource = fss, joinfield = 'files', name='bucket')
sd.connect(infosource,'source',bucket,'source')
sd.connect(infosource,'template',bucket,'target')
sd.connect(infosource,'hemi',bucket,'hemi')
sd.connect(sdist,'distances',bucket,'files')
# Sink the data
datasink = Node(DataSink(), name='sinker')
datasink.inputs.parameterization = False
datasink.inputs.base_directory = os.path.abspath(args.sink)
sd.connect(genfoldname,'cname',datasink,'container')
sd.connect(bucket,'group_dist',datasink,'group_distances')
return sd
def create_workflow(args, name=None):
with open(args.subject_file) as f:
subject_list = f.read().splitlines()
if name is None:
name = 'surfdist'
kwargs = dict(subjects_dir = args.subjects_dir,
subject_list = subject_list,
sources = args.sources,
target = args.target_surfs,
hemi = args.hemi,
atlas = args.annot,
labs = args.labels,
name=name)
wf = create_surfdist_workflow(**kwargs)
return wf
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
import os
defstr = ' (default %(default)s)'
parser = ArgumentParser(description='''This script generates and runs a nipype pipeline for calculating distances from source label(s)
on a Freesurfer surface. After calculating the distances in native space it transforms
the distances into selected target space and creates a CSV file containing data for all
subjects. This table can be used for permutation testing in PALM.''',
formatter_class=RawTextHelpFormatter)
parser.add_argument("-s", "--subject_ids", dest="subject_file",
help="Subject list file", required=True)
parser.add_argument("-sd", "--subjects_dir", dest="subjects_dir",
help="FreeSurfer subject directory", required=True)
parser.add_argument("-t", "--target_surfaces", dest="target_surfs", nargs="+",
default=['fsaverage5'],
help="FreeSurfer target surfaces" + defstr)
parser.add_argument("-a", "--annot", dest="annot",
default='aparc.a2009s',
help="Annotation for source label(s)" + defstr)
parser.add_argument("-l", "--label", dest="labels",
default='cortex',
help="Label(s)" + defstr)
parser.add_argument("-src", "--source", dest="sources", nargs = "+",
default=['S_central'],
help="Label(s) to calculate distances from" + defstr)
parser.add_argument("-hemi", "--hemi", dest="hemi", nargs = "+",
default=['lh','rh'],
help="Hemisphere(s) for distance calculation" + defstr)
parser.add_argument("-o", "--output_dir", dest="sink",
default=os.path.join(os.getcwd(),'geodesic_distances'),
help="Output directory base")
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
args = parser.parse_args()
wf = create_workflow(args)
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
else:
work_dir = os.getcwd()
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
wf.write_graph(dotfilename='func_preproc.dot', graph2use='exec', format='pdf', simple_form=False)
|
margulies/surfdist
|
nipype/surfdist_nipype.py
|
Python
|
mit
| 8,586 | 0.02737 |
# -*- test-case-name: twisted.test.test_ftp -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An FTP protocol implementation
@author: Itamar Shtull-Trauring
@author: Jp Calderone
@author: Andrew Bennetts
"""
# System Imports
import os
import time
import re
import operator
import stat
import errno
import fnmatch
import warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import Interface, implements
# Twisted Imports
from twisted import copyright
from twisted.internet import reactor, interfaces, protocol, error, defer
from twisted.protocols import basic, policies
from twisted.python import log, failure, filepath
from twisted.python.compat import reduce
from twisted.cred import error as cred_error, portal, credentials, checkers
# constants
# response codes
RESTART_MARKER_REPLY = "100"
SERVICE_READY_IN_N_MINUTES = "120"
DATA_CNX_ALREADY_OPEN_START_XFR = "125"
FILE_STATUS_OK_OPEN_DATA_CNX = "150"
CMD_OK = "200.1"
TYPE_SET_OK = "200.2"
ENTERING_PORT_MODE = "200.3"
CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202"
SYS_STATUS_OR_HELP_REPLY = "211"
DIR_STATUS = "212"
FILE_STATUS = "213"
HELP_MSG = "214"
NAME_SYS_TYPE = "215"
SVC_READY_FOR_NEW_USER = "220.1"
WELCOME_MSG = "220.2"
SVC_CLOSING_CTRL_CNX = "221"
GOODBYE_MSG = "221"
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225"
CLOSING_DATA_CNX = "226"
TXFR_COMPLETE_OK = "226"
ENTERING_PASV_MODE = "227"
ENTERING_EPSV_MODE = "229"
USR_LOGGED_IN_PROCEED = "230.1" # v1 of code 230
GUEST_LOGGED_IN_PROCEED = "230.2" # v2 of code 230
REQ_FILE_ACTN_COMPLETED_OK = "250"
PWD_REPLY = "257.1"
MKD_REPLY = "257.2"
USR_NAME_OK_NEED_PASS = "331.1" # v1 of Code 331
GUEST_NAME_OK_NEED_EMAIL = "331.2" # v2 of code 331
NEED_ACCT_FOR_LOGIN = "332"
REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350"
SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1"
TOO_MANY_CONNECTIONS = "421.2"
CANT_OPEN_DATA_CNX = "425"
CNX_CLOSED_TXFR_ABORTED = "426"
REQ_ACTN_ABRTD_FILE_UNAVAIL = "450"
REQ_ACTN_ABRTD_LOCAL_ERR = "451"
REQ_ACTN_ABRTD_INSUFF_STORAGE = "452"
SYNTAX_ERR = "500"
SYNTAX_ERR_IN_ARGS = "501"
CMD_NOT_IMPLMNTD = "502"
BAD_CMD_SEQ = "503"
CMD_NOT_IMPLMNTD_FOR_PARAM = "504"
NOT_LOGGED_IN = "530.1" # v1 of code 530 - please log in
AUTH_FAILURE = "530.2" # v2 of code 530 - authorization failure
NEED_ACCT_FOR_STOR = "532"
FILE_NOT_FOUND = "550.1" # no such file or directory
PERMISSION_DENIED = "550.2" # permission denied
ANON_USER_DENIED = "550.3" # anonymous users can't alter filesystem
IS_NOT_A_DIR = "550.4" # rmd called on a path that is not a directory
REQ_ACTN_NOT_TAKEN = "550.5"
FILE_EXISTS = "550.6"
IS_A_DIR = "550.7"
PAGE_TYPE_UNK = "551"
EXCEEDED_STORAGE_ALLOC = "552"
FILENAME_NOT_ALLOWED = "553"
RESPONSE = {
# -- 100's --
RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', # TODO: this must be fixed
SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes',
DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer',
FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.',
# -- 200's --
CMD_OK: '200 Command OK',
TYPE_SET_OK: '200 Type set to %s.',
ENTERING_PORT_MODE: '200 PORT OK',
CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site',
SYS_STATUS_OR_HELP_REPLY: '211 System status reply',
DIR_STATUS: '212 %s',
FILE_STATUS: '213 %s',
HELP_MSG: '214 help: %s',
NAME_SYS_TYPE: '215 UNIX Type: L8',
WELCOME_MSG: "220 %s",
SVC_READY_FOR_NEW_USER: '220 Service ready',
GOODBYE_MSG: '221 Goodbye.',
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress',
CLOSING_DATA_CNX: '226 Abort successful',
TXFR_COMPLETE_OK: '226 Transfer Complete.',
ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).',
ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', # where is epsv defined in the rfc's?
USR_LOGGED_IN_PROCEED: '230 User logged in, proceed',
GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.',
REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', #i.e. CWD completed ok
PWD_REPLY: '257 "%s"',
MKD_REPLY: '257 "%s" created',
# -- 300's --
'userotp': '331 Response to %s.', # ???
USR_NAME_OK_NEED_PASS: '331 Password required for %s.',
GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.',
REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.',
# -- 400's --
SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.',
TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.',
CANT_OPEN_DATA_CNX: "425 Can't open data connection.",
CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.',
REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.',
# -- 500's --
SYNTAX_ERR: "500 Syntax error: %s",
SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.',
CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented",
BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s',
CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.",
NOT_LOGGED_IN: '530 Please login with USER and PASS.',
AUTH_FAILURE: '530 Sorry, Authentication failed.',
NEED_ACCT_FOR_STOR: '532 Need an account for storing files',
FILE_NOT_FOUND: '550 %s: No such file or directory.',
PERMISSION_DENIED: '550 %s: Permission denied.',
ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem',
IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory',
FILE_EXISTS: '550 %s: File exists',
IS_A_DIR: '550 %s: is a directory',
REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s',
EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation',
FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed'
}
class InvalidPath(Exception):
"""
Internal exception used to signify an error during parsing a path.
"""
def toSegments(cwd, path):
"""
Normalize a path, as represented by a list of strings each
representing one segment of the path.
"""
if path.startswith('/'):
segs = []
else:
segs = cwd[:]
for s in path.split('/'):
if s == '.' or s == '':
continue
elif s == '..':
if segs:
segs.pop()
else:
raise InvalidPath(cwd, path)
elif '\0' in s or '/' in s:
raise InvalidPath(cwd, path)
else:
segs.append(s)
return segs
def errnoToFailure(e, path):
"""
Map C{OSError} and C{IOError} to standard FTP errors.
"""
if e == errno.ENOENT:
return defer.fail(FileNotFoundError(path))
elif e == errno.EACCES or e == errno.EPERM:
return defer.fail(PermissionDeniedError(path))
elif e == errno.ENOTDIR:
return defer.fail(IsNotADirectoryError(path))
elif e == errno.EEXIST:
return defer.fail(FileExistsError(path))
elif e == errno.EISDIR:
return defer.fail(IsADirectoryError(path))
else:
return defer.fail()
class FTPCmdError(Exception):
"""
Generic exception for FTP commands.
"""
def __init__(self, *msg):
Exception.__init__(self, *msg)
self.errorMessage = msg
def response(self):
"""
Generate a FTP response message for this error.
"""
return RESPONSE[self.errorCode] % self.errorMessage
class FileNotFoundError(FTPCmdError):
"""
Raised when trying to access a non existent file or directory.
"""
errorCode = FILE_NOT_FOUND
class AnonUserDeniedError(FTPCmdError):
"""
Raised when an anonymous user issues a command that will alter the
filesystem
"""
def __init__(self):
# No message
FTPCmdError.__init__(self, None)
errorCode = ANON_USER_DENIED
class PermissionDeniedError(FTPCmdError):
"""
Raised when access is attempted to a resource to which access is
not allowed.
"""
errorCode = PERMISSION_DENIED
class IsNotADirectoryError(FTPCmdError):
"""
Raised when RMD is called on a path that isn't a directory.
"""
errorCode = IS_NOT_A_DIR
class FileExistsError(FTPCmdError):
"""
Raised when attempted to override an existing resource.
"""
errorCode = FILE_EXISTS
class IsADirectoryError(FTPCmdError):
"""
Raised when DELE is called on a path that is a directory.
"""
errorCode = IS_A_DIR
class CmdSyntaxError(FTPCmdError):
"""
Raised when a command syntax is wrong.
"""
errorCode = SYNTAX_ERR
class CmdArgSyntaxError(FTPCmdError):
"""
Raised when a command is called with wrong value or a wrong number of
arguments.
"""
errorCode = SYNTAX_ERR_IN_ARGS
class CmdNotImplementedError(FTPCmdError):
"""
Raised when an unimplemented command is given to the server.
"""
errorCode = CMD_NOT_IMPLMNTD
class CmdNotImplementedForArgError(FTPCmdError):
"""
Raised when the handling of a parameter for a command is not implemented by
the server.
"""
errorCode = CMD_NOT_IMPLMNTD_FOR_PARAM
class FTPError(Exception):
pass
class PortConnectionError(Exception):
pass
class BadCmdSequenceError(FTPCmdError):
"""
Raised when a client sends a series of commands in an illogical sequence.
"""
errorCode = BAD_CMD_SEQ
class AuthorizationError(FTPCmdError):
"""
Raised when client authentication fails.
"""
errorCode = AUTH_FAILURE
def debugDeferred(self, *_):
log.msg('debugDeferred(): %s' % str(_), debug=True)
# -- DTP Protocol --
_months = [
None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class DTP(object, protocol.Protocol):
implements(interfaces.IConsumer)
isConnected = False
_cons = None
_onConnLost = None
_buffer = None
def connectionMade(self):
self.isConnected = True
self.factory.deferred.callback(None)
self._buffer = []
def connectionLost(self, reason):
self.isConnected = False
if self._onConnLost is not None:
self._onConnLost.callback(None)
def sendLine(self, line):
self.transport.write(line + '\r\n')
def _formatOneListResponse(self, name, size, directory, permissions, hardlinks, modified, owner, group):
def formatMode(mode):
return ''.join([mode & (256 >> n) and 'rwx'[n % 3] or '-' for n in range(9)])
def formatDate(mtime):
now = time.gmtime()
info = {
'month': _months[mtime.tm_mon],
'day': mtime.tm_mday,
'year': mtime.tm_year,
'hour': mtime.tm_hour,
'minute': mtime.tm_min
}
if now.tm_year != mtime.tm_year:
return '%(month)s %(day)02d %(year)5d' % info
else:
return '%(month)s %(day)02d %(hour)02d:%(minute)02d' % info
format = ('%(directory)s%(permissions)s%(hardlinks)4d '
'%(owner)-9s %(group)-9s %(size)15d %(date)12s '
'%(name)s')
return format % {
'directory': directory and 'd' or '-',
'permissions': formatMode(permissions),
'hardlinks': hardlinks,
'owner': owner[:8],
'group': group[:8],
'size': size,
'date': formatDate(time.gmtime(modified)),
'name': name}
def sendListResponse(self, name, response):
self.sendLine(self._formatOneListResponse(name, *response))
# Proxy IConsumer to our transport
def registerProducer(self, producer, streaming):
return self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
self.transport.loseConnection()
def write(self, data):
if self.isConnected:
return self.transport.write(data)
raise Exception("Crap damn crap damn crap damn")
# Pretend to be a producer, too.
def _conswrite(self, bytes):
try:
self._cons.write(bytes)
except:
self._onConnLost.errback()
def dataReceived(self, bytes):
if self._cons is not None:
self._conswrite(bytes)
else:
self._buffer.append(bytes)
def _unregConsumer(self, ignored):
self._cons.unregisterProducer()
self._cons = None
del self._onConnLost
return ignored
def registerConsumer(self, cons):
assert self._cons is None
self._cons = cons
self._cons.registerProducer(self, True)
for chunk in self._buffer:
self._conswrite(chunk)
self._buffer = None
if self.isConnected:
self._onConnLost = d = defer.Deferred()
d.addBoth(self._unregConsumer)
return d
else:
self._cons.unregisterProducer()
self._cons = None
return defer.succeed(None)
def resumeProducing(self):
self.transport.resumeProducing()
def pauseProducing(self):
self.transport.pauseProducing()
def stopProducing(self):
self.transport.stopProducing()
class DTPFactory(protocol.ClientFactory):
"""
Client factory for I{data transfer process} protocols.
@ivar peerCheck: perform checks to make sure the ftp-pi's peer is the same
as the dtp's
@ivar pi: a reference to this factory's protocol interpreter
@ivar _state: Indicates the current state of the DTPFactory. Initially,
this is L{_IN_PROGRESS}. If the connection fails or times out, it is
L{_FAILED}. If the connection succeeds before the timeout, it is
L{_FINISHED}.
"""
_IN_PROGRESS = object()
_FAILED = object()
_FINISHED = object()
_state = _IN_PROGRESS
# -- configuration variables --
peerCheck = False
# -- class variables --
def __init__(self, pi, peerHost=None, reactor=None):
"""Constructor
@param pi: this factory's protocol interpreter
@param peerHost: if peerCheck is True, this is the tuple that the
generated instance will use to perform security checks
"""
self.pi = pi # the protocol interpreter that is using this factory
self.peerHost = peerHost # the from FTP.transport.peerHost()
self.deferred = defer.Deferred() # deferred will fire when instance is connected
self.delayedCall = None
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
def buildProtocol(self, addr):
log.msg('DTPFactory.buildProtocol', debug=True)
if self._state is not self._IN_PROGRESS:
return None
self._state = self._FINISHED
self.cancelTimeout()
p = DTP()
p.factory = self
p.pi = self.pi
self.pi.dtpInstance = p
return p
def stopFactory(self):
log.msg('dtpFactory.stopFactory', debug=True)
self.cancelTimeout()
def timeoutFactory(self):
log.msg('timed out waiting for DTP connection')
if self._state is not self._IN_PROGRESS:
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
d.errback(
PortConnectionError(defer.TimeoutError("DTPFactory timeout")))
def cancelTimeout(self):
if self.delayedCall is not None and self.delayedCall.active():
log.msg('cancelling DTP timeout', debug=True)
self.delayedCall.cancel()
def setTimeout(self, seconds):
log.msg('DTPFactory.setTimeout set to %s seconds' % seconds)
self.delayedCall = self._reactor.callLater(seconds, self.timeoutFactory)
def clientConnectionFailed(self, connector, reason):
if self._state is not self._IN_PROGRESS:
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
d.errback(PortConnectionError(reason))
# -- FTP-PI (Protocol Interpreter) --
class ASCIIConsumerWrapper(object):
def __init__(self, cons):
self.cons = cons
self.registerProducer = cons.registerProducer
self.unregisterProducer = cons.unregisterProducer
assert os.linesep == "\r\n" or len(os.linesep) == 1, "Unsupported platform (yea right like this even exists)"
if os.linesep == "\r\n":
self.write = cons.write
def write(self, bytes):
return self.cons.write(bytes.replace(os.linesep, "\r\n"))
class FileConsumer(object):
"""
A consumer for FTP input that writes data to a file.
@ivar fObj: a file object opened for writing, used to write data received.
@type fObj: C{file}
"""
implements(interfaces.IConsumer)
def __init__(self, fObj):
self.fObj = fObj
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
self.producer = None
self.fObj.close()
def write(self, bytes):
self.fObj.write(bytes)
class FTPOverflowProtocol(basic.LineReceiver):
"""FTP mini-protocol for when there are too many connections."""
def connectionMade(self):
self.sendLine(RESPONSE[TOO_MANY_CONNECTIONS])
self.transport.loseConnection()
class FTP(object, basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol Interpreter for the File Transfer Protocol
@ivar state: The current server state. One of L{UNAUTH},
L{INAUTH}, L{AUTHED}, L{RENAMING}.
@ivar shell: The connected avatar
@ivar binary: The transfer mode. If false, ASCII.
@ivar dtpFactory: Generates a single DTP for this session
@ivar dtpPort: Port returned from listenTCP
@ivar listenFactory: A callable with the signature of
L{twisted.internet.interfaces.IReactorTCP.listenTCP} which will be used
to create Ports for passive connections (mainly for testing).
@ivar passivePortRange: iterator used as source of passive port numbers.
@type passivePortRange: C{iterator}
"""
disconnected = False
# States an FTP can be in
UNAUTH, INAUTH, AUTHED, RENAMING = range(4)
# how long the DTP waits for a connection
dtpTimeout = 10
portal = None
shell = None
dtpFactory = None
dtpPort = None
dtpInstance = None
binary = True
passivePortRange = xrange(0, 1)
listenFactory = reactor.listenTCP
def reply(self, key, *args):
msg = RESPONSE[key] % args
self.sendLine(msg)
def connectionMade(self):
self.state = self.UNAUTH
self.setTimeout(self.timeOut)
self.reply(WELCOME_MSG, self.factory.welcomeMessage)
def connectionLost(self, reason):
# if we have a DTP protocol instance running and
# we lose connection to the client's PI, kill the
# DTP connection and close the port
if self.dtpFactory:
self.cleanupDTP()
self.setTimeout(None)
if hasattr(self.shell, 'logout') and self.shell.logout is not None:
self.shell.logout()
self.shell = None
self.transport = None
def timeoutConnection(self):
self.transport.loseConnection()
def lineReceived(self, line):
self.resetTimeout()
self.pauseProducing()
def processFailed(err):
if err.check(FTPCmdError):
self.sendLine(err.value.response())
elif (err.check(TypeError) and
err.value.args[0].find('takes exactly') != -1):
self.reply(SYNTAX_ERR, "%s requires an argument." % (cmd,))
else:
log.msg("Unexpected FTP error")
log.err(err)
self.reply(REQ_ACTN_NOT_TAKEN, "internal server error")
def processSucceeded(result):
if isinstance(result, tuple):
self.reply(*result)
elif result is not None:
self.reply(result)
def allDone(ignored):
if not self.disconnected:
self.resumeProducing()
spaceIndex = line.find(' ')
if spaceIndex != -1:
cmd = line[:spaceIndex]
args = (line[spaceIndex + 1:],)
else:
cmd = line
args = ()
d = defer.maybeDeferred(self.processCommand, cmd, *args)
d.addCallbacks(processSucceeded, processFailed)
d.addErrback(log.err)
# XXX It burnsss
# LineReceiver doesn't let you resumeProducing inside
# lineReceived atm
from twisted.internet import reactor
reactor.callLater(0, d.addBoth, allDone)
def processCommand(self, cmd, *params):
cmd = cmd.upper()
if self.state == self.UNAUTH:
if cmd == 'USER':
return self.ftp_USER(*params)
elif cmd == 'PASS':
return BAD_CMD_SEQ, "USER required before PASS"
else:
return NOT_LOGGED_IN
elif self.state == self.INAUTH:
if cmd == 'PASS':
return self.ftp_PASS(*params)
else:
return BAD_CMD_SEQ, "PASS required after USER"
elif self.state == self.AUTHED:
method = getattr(self, "ftp_" + cmd, None)
if method is not None:
return method(*params)
return defer.fail(CmdNotImplementedError(cmd))
elif self.state == self.RENAMING:
if cmd == 'RNTO':
return self.ftp_RNTO(*params)
else:
return BAD_CMD_SEQ, "RNTO required after RNFR"
def getDTPPort(self, factory):
"""
Return a port for passive access, using C{self.passivePortRange}
attribute.
"""
for portn in self.passivePortRange:
try:
dtpPort = self.listenFactory(portn, factory)
except error.CannotListenError:
continue
else:
return dtpPort
raise error.CannotListenError('', portn,
"No port available in range %s" %
(self.passivePortRange,))
def ftp_USER(self, username):
"""
First part of login. Get the username the peer wants to
authenticate as.
"""
if not username:
return defer.fail(CmdSyntaxError('USER requires an argument'))
self._user = username
self.state = self.INAUTH
if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
return GUEST_NAME_OK_NEED_EMAIL
else:
return (USR_NAME_OK_NEED_PASS, username)
# TODO: add max auth try before timeout from ip...
# TODO: need to implement minimal ABOR command
def ftp_PASS(self, password):
"""
Second part of login. Get the password the peer wants to
authenticate with.
"""
if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
# anonymous login
creds = credentials.Anonymous()
reply = GUEST_LOGGED_IN_PROCEED
else:
# user login
creds = credentials.UsernamePassword(self._user, password)
reply = USR_LOGGED_IN_PROCEED
del self._user
def _cbLogin((interface, avatar, logout)):
assert interface is IFTPShell, "The realm is busted, jerk."
self.shell = avatar
self.logout = logout
self.workingDirectory = []
self.state = self.AUTHED
return reply
def _ebLogin(failure):
failure.trap(cred_error.UnauthorizedLogin, cred_error.UnhandledCredentials)
self.state = self.UNAUTH
raise AuthorizationError
d = self.portal.login(creds, None, IFTPShell)
d.addCallbacks(_cbLogin, _ebLogin)
return d
def ftp_PASV(self):
"""Request for a passive connection
from the rfc::
This command requests the server-DTP to \"listen\" on a data port
(which is not its default data port) and to wait for a connection
rather than initiate one upon receipt of a transfer command. The
response to this command includes the host and port address this
server is listening on.
"""
# if we have a DTP port set up, lose it.
if self.dtpFactory is not None:
# cleanupDTP sets dtpFactory to none. Later we'll do
# cleanup here or something.
self.cleanupDTP()
self.dtpFactory = DTPFactory(pi=self)
self.dtpFactory.setTimeout(self.dtpTimeout)
self.dtpPort = self.getDTPPort(self.dtpFactory)
host = self.transport.getHost().host
port = self.dtpPort.getHost().port
self.reply(ENTERING_PASV_MODE, encodeHostPort(host, port))
return self.dtpFactory.deferred.addCallback(lambda ign: None)
def ftp_PORT(self, address):
addr = map(int, address.split(','))
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = addr[4] << 8 | addr[5]
# if we have a DTP port set up, lose it.
if self.dtpFactory is not None:
self.cleanupDTP()
self.dtpFactory = DTPFactory(pi=self, peerHost=self.transport.getPeer().host)
self.dtpFactory.setTimeout(self.dtpTimeout)
self.dtpPort = reactor.connectTCP(ip, port, self.dtpFactory)
def connected(ignored):
return ENTERING_PORT_MODE
def connFailed(err):
err.trap(PortConnectionError)
return CANT_OPEN_DATA_CNX
return self.dtpFactory.deferred.addCallbacks(connected, connFailed)
def ftp_LIST(self, path=''):
""" This command causes a list to be sent from the server to the
passive DTP. If the pathname specifies a directory or other
group of files, the server should transfer a list of files
in the specified directory. If the pathname specifies a
file then the server should send current information on the
file. A null argument implies the user's current working or
default directory.
"""
# Uh, for now, do this retarded thing.
if self.dtpInstance is None or not self.dtpInstance.isConnected:
return defer.fail(BadCmdSequenceError('must send PORT or PASV before RETR'))
# bug in konqueror
if path == "-a":
path = ''
# bug in gFTP 2.0.15
if path == "-aL":
path = ''
# bug in Nautilus 2.10.0
if path == "-L":
path = ''
# bug in ange-ftp
if path == "-la":
path = ''
def gotListing(results):
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, attrs) in results:
self.dtpInstance.sendListResponse(name, attrs)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
d = self.shell.list(
segments,
('size', 'directory', 'permissions', 'hardlinks',
'modified', 'owner', 'group'))
d.addCallback(gotListing)
return d
def ftp_NLST(self, path):
"""
This command causes a directory listing to be sent from the server to
the client. The pathname should specify a directory or other
system-specific file group descriptor. An empty path implies the current
working directory. If the path is non-existent, send nothing. If the
path is to a file, send only the file name.
@type path: C{str}
@param path: The path for which a directory listing should be returned.
@rtype: L{Deferred}
@return: a L{Deferred} which will be fired when the listing request
is finished.
"""
# XXX: why is this check different from ftp_RETR/ftp_STOR? See #4180
if self.dtpInstance is None or not self.dtpInstance.isConnected:
return defer.fail(
BadCmdSequenceError('must send PORT or PASV before RETR'))
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbList(results):
"""
Send, line by line, each file in the directory listing, and then
close the connection.
@type results: A C{list} of C{tuple}. The first element of each
C{tuple} is a C{str} and the second element is a C{list}.
@param results: The names of the files in the directory.
@rtype: C{tuple}
@return: A C{tuple} containing the status code for a successful
transfer.
"""
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
self.dtpInstance.sendLine(name)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
def cbGlob(results):
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
if fnmatch.fnmatch(name, segments[-1]):
self.dtpInstance.sendLine(name)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
def listErr(results):
"""
RFC 959 specifies that an NLST request may only return directory
listings. Thus, send nothing and just close the connection.
@type results: L{Failure}
@param results: The L{Failure} wrapping a L{FileNotFoundError} that
occurred while trying to list the contents of a nonexistent
directory.
@rtype: C{tuple}
@returns: A C{tuple} containing the status code for a successful
transfer.
"""
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
# XXX This globbing may be incomplete: see #4181
if segments and (
'*' in segments[-1] or '?' in segments[-1] or
('[' in segments[-1] and ']' in segments[-1])):
d = self.shell.list(segments[:-1])
d.addCallback(cbGlob)
else:
d = self.shell.list(segments)
d.addCallback(cbList)
# self.shell.list will generate an error if the path is invalid
d.addErrback(listErr)
return d
def ftp_CWD(self, path):
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
# XXX Eh, what to fail with here?
return defer.fail(FileNotFoundError(path))
def accessGranted(result):
self.workingDirectory = segments
return (REQ_FILE_ACTN_COMPLETED_OK,)
return self.shell.access(segments).addCallback(accessGranted)
def ftp_CDUP(self):
return self.ftp_CWD('..')
def ftp_PWD(self):
return (PWD_REPLY, '/' + '/'.join(self.workingDirectory))
def ftp_RETR(self, path):
if self.dtpInstance is None:
raise BadCmdSequenceError('PORT or PASV required before RETR')
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
# XXX For now, just disable the timeout. Later we'll want to
# leave it active and have the DTP connection reset it
# periodically.
self.setTimeout(None)
# Put it back later
def enableTimeout(result):
self.setTimeout(self.factory.timeOut)
return result
# And away she goes
if not self.binary:
cons = ASCIIConsumerWrapper(self.dtpInstance)
else:
cons = self.dtpInstance
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
log.msg("Unexpected error attempting to transmit file to client:")
log.err(err)
return (CNX_CLOSED_TXFR_ABORTED,)
def cbOpened(file):
# Tell them what to doooo
if self.dtpInstance.isConnected:
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
d = file.send(cons)
d.addCallbacks(cbSent, ebSent)
return d
def ebOpened(err):
if not err.check(PermissionDeniedError, FileNotFoundError, IsNotADirectoryError):
log.msg("Unexpected error attempting to open file for transmission:")
log.err(err)
if err.check(FTPCmdError):
return (err.value.errorCode, '/'.join(newsegs))
return (FILE_NOT_FOUND, '/'.join(newsegs))
d = self.shell.openForReading(newsegs)
d.addCallbacks(cbOpened, ebOpened)
d.addBoth(enableTimeout)
# Pass back Deferred that fires when the transfer is done
return d
def ftp_STOR(self, path):
if self.dtpInstance is None:
raise BadCmdSequenceError('PORT or PASV required before STOR')
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
# XXX For now, just disable the timeout. Later we'll want to
# leave it active and have the DTP connection reset it
# periodically.
self.setTimeout(None)
# Put it back later
def enableTimeout(result):
self.setTimeout(self.factory.timeOut)
return result
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
log.msg("Unexpected error receiving file from client:")
log.err(err)
return (CNX_CLOSED_TXFR_ABORTED,)
def cbConsumer(cons):
if not self.binary:
cons = ASCIIConsumerWrapper(cons)
d = self.dtpInstance.registerConsumer(cons)
# Tell them what to doooo
if self.dtpInstance.isConnected:
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
return d
def cbOpened(file):
d = file.receive()
d.addCallback(cbConsumer)
d.addCallback(lambda ignored: file.close())
d.addCallbacks(cbSent, ebSent)
return d
def ebOpened(err):
if not err.check(PermissionDeniedError, FileNotFoundError, IsNotADirectoryError):
log.msg("Unexpected error attempting to open file for upload:")
log.err(err)
if isinstance(err.value, FTPCmdError):
return (err.value.errorCode, '/'.join(newsegs))
return (FILE_NOT_FOUND, '/'.join(newsegs))
d = self.shell.openForWriting(newsegs)
d.addCallbacks(cbOpened, ebOpened)
d.addBoth(enableTimeout)
# Pass back Deferred that fires when the transfer is done
return d
def ftp_SIZE(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbStat((size,)):
return (FILE_STATUS, str(size))
return self.shell.stat(newsegs, ('size',)).addCallback(cbStat)
def ftp_MDTM(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbStat((modified,)):
return (FILE_STATUS, time.strftime('%Y%m%d%H%M%S', time.gmtime(modified)))
return self.shell.stat(newsegs, ('modified',)).addCallback(cbStat)
def ftp_TYPE(self, type):
p = type.upper()
if p:
f = getattr(self, 'type_' + p[0], None)
if f is not None:
return f(p[1:])
return self.type_UNKNOWN(p)
return (SYNTAX_ERR,)
def type_A(self, code):
if code == '' or code == 'N':
self.binary = False
return (TYPE_SET_OK, 'A' + code)
else:
return defer.fail(CmdArgSyntaxError(code))
def type_I(self, code):
if code == '':
self.binary = True
return (TYPE_SET_OK, 'I')
else:
return defer.fail(CmdArgSyntaxError(code))
def type_UNKNOWN(self, code):
return defer.fail(CmdNotImplementedForArgError(code))
def ftp_SYST(self):
return NAME_SYS_TYPE
def ftp_STRU(self, structure):
p = structure.upper()
if p == 'F':
return (CMD_OK,)
return defer.fail(CmdNotImplementedForArgError(structure))
def ftp_MODE(self, mode):
p = mode.upper()
if p == 'S':
return (CMD_OK,)
return defer.fail(CmdNotImplementedForArgError(mode))
def ftp_MKD(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.makeDirectory(newsegs).addCallback(lambda ign: (MKD_REPLY, path))
def ftp_RMD(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.removeDirectory(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_DELE(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.removeFile(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_NOOP(self):
return (CMD_OK,)
def ftp_RNFR(self, fromName):
self._fromName = fromName
self.state = self.RENAMING
return (REQ_FILE_ACTN_PENDING_FURTHER_INFO,)
def ftp_RNTO(self, toName):
fromName = self._fromName
del self._fromName
self.state = self.AUTHED
try:
fromsegs = toSegments(self.workingDirectory, fromName)
tosegs = toSegments(self.workingDirectory, toName)
except InvalidPath:
return defer.fail(FileNotFoundError(fromName))
return self.shell.rename(fromsegs, tosegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_QUIT(self):
self.reply(GOODBYE_MSG)
self.transport.loseConnection()
self.disconnected = True
def cleanupDTP(self):
"""call when DTP connection exits
"""
log.msg('cleanupDTP', debug=True)
log.msg(self.dtpPort)
dtpPort, self.dtpPort = self.dtpPort, None
if interfaces.IListeningPort.providedBy(dtpPort):
dtpPort.stopListening()
elif interfaces.IConnector.providedBy(dtpPort):
dtpPort.disconnect()
else:
assert False, "dtpPort should be an IListeningPort or IConnector, instead is %r" % (dtpPort,)
self.dtpFactory.stopFactory()
self.dtpFactory = None
if self.dtpInstance is not None:
self.dtpInstance = None
class FTPFactory(policies.LimitTotalConnectionsFactory):
"""
A factory for producing ftp protocol instances
@ivar timeOut: the protocol interpreter's idle timeout time in seconds,
default is 600 seconds.
@ivar passivePortRange: value forwarded to C{protocol.passivePortRange}.
@type passivePortRange: C{iterator}
"""
protocol = FTP
overflowProtocol = FTPOverflowProtocol
allowAnonymous = True
userAnonymous = 'anonymous'
timeOut = 600
welcomeMessage = "Twisted %s FTP Server" % (copyright.version,)
passivePortRange = xrange(0, 1)
def __init__(self, portal=None, userAnonymous='anonymous'):
self.portal = portal
self.userAnonymous = userAnonymous
self.instances = []
def buildProtocol(self, addr):
p = policies.LimitTotalConnectionsFactory.buildProtocol(self, addr)
if p is not None:
p.wrappedProtocol.portal = self.portal
p.wrappedProtocol.timeOut = self.timeOut
p.wrappedProtocol.passivePortRange = self.passivePortRange
return p
def stopFactory(self):
# make sure ftp instance's timeouts are set to None
# to avoid reactor complaints
[p.setTimeout(None) for p in self.instances if p.timeOut is not None]
policies.LimitTotalConnectionsFactory.stopFactory(self)
# -- Cred Objects --
class IFTPShell(Interface):
"""
An abstraction of the shell commands used by the FTP protocol for
a given user account.
All path names must be absolute.
"""
def makeDirectory(path):
"""
Create a directory.
@param path: The path, as a list of segments, to create
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the directory has been
created, or which fails if the directory cannot be created.
"""
def removeDirectory(path):
"""
Remove a directory.
@param path: The path, as a list of segments, to remove
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the directory has been
removed, or which fails if the directory cannot be removed.
"""
def removeFile(path):
"""
Remove a file.
@param path: The path, as a list of segments, to remove
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the file has been
removed, or which fails if the file cannot be removed.
"""
def rename(fromPath, toPath):
"""
Rename a file or directory.
@param fromPath: The current name of the path.
@type fromPath: C{list} of C{unicode}
@param toPath: The desired new name of the path.
@type toPath: C{list} of C{unicode}
@return: A Deferred which fires when the path has been
renamed, or which fails if the path cannot be renamed.
"""
def access(path):
"""
Determine whether access to the given path is allowed.
@param path: The path, as a list of segments
@return: A Deferred which fires with None if access is allowed
or which fails with a specific exception type if access is
denied.
"""
def stat(path, keys=()):
"""
Retrieve information about the given path.
This is like list, except it will never return results about
child paths.
"""
def list(path, keys=()):
"""
Retrieve information about the given path.
If the path represents a non-directory, the result list should
have only one entry with information about that non-directory.
Otherwise, the result list should have an element for each
child of the directory.
@param path: The path, as a list of segments, to list
@type path: C{list} of C{unicode}
@param keys: A tuple of keys desired in the resulting
dictionaries.
@return: A Deferred which fires with a list of (name, list),
where the name is the name of the entry as a unicode string
and each list contains values corresponding to the requested
keys. The following are possible elements of keys, and the
values which should be returned for them:
- C{'size'}: size in bytes, as an integer (this is kinda required)
- C{'directory'}: boolean indicating the type of this entry
- C{'permissions'}: a bitvector (see os.stat(foo).st_mode)
- C{'hardlinks'}: Number of hard links to this entry
- C{'modified'}: number of seconds since the epoch since entry was
modified
- C{'owner'}: string indicating the user owner of this entry
- C{'group'}: string indicating the group owner of this entry
"""
def openForReading(path):
"""
@param path: The path, as a list of segments, to open
@type path: C{list} of C{unicode}
@rtype: C{Deferred} which will fire with L{IReadFile}
"""
def openForWriting(path):
"""
@param path: The path, as a list of segments, to open
@type path: C{list} of C{unicode}
@rtype: C{Deferred} which will fire with L{IWriteFile}
"""
class IReadFile(Interface):
"""
A file out of which bytes may be read.
"""
def send(consumer):
"""
Produce the contents of the given path to the given consumer. This
method may only be invoked once on each provider.
@type consumer: C{IConsumer}
@return: A Deferred which fires when the file has been
consumed completely.
"""
class IWriteFile(Interface):
"""
A file into which bytes may be written.
"""
def receive():
"""
Create a consumer which will write to this file. This method may
only be invoked once on each provider.
@rtype: C{Deferred} of C{IConsumer}
"""
def close():
"""
Perform any post-write work that needs to be done. This method may
only be invoked once on each provider, and will always be invoked
after receive().
@rtype: C{Deferred} of anything: the value is ignored. The FTP client
will not see their upload request complete until this Deferred has
been fired.
"""
def _getgroups(uid):
"""Return the primary and supplementary groups for the given UID.
@type uid: C{int}
"""
result = []
pwent = pwd.getpwuid(uid)
result.append(pwent.pw_gid)
for grent in grp.getgrall():
if pwent.pw_name in grent.gr_mem:
result.append(grent.gr_gid)
return result
def _testPermissions(uid, gid, spath, mode='r'):
"""
checks to see if uid has proper permissions to access path with mode
@type uid: C{int}
@param uid: numeric user id
@type gid: C{int}
@param gid: numeric group id
@type spath: C{str}
@param spath: the path on the server to test
@type mode: C{str}
@param mode: 'r' or 'w' (read or write)
@rtype: C{bool}
@return: True if the given credentials have the specified form of
access to the given path
"""
if mode == 'r':
usr = stat.S_IRUSR
grp = stat.S_IRGRP
oth = stat.S_IROTH
amode = os.R_OK
elif mode == 'w':
usr = stat.S_IWUSR
grp = stat.S_IWGRP
oth = stat.S_IWOTH
amode = os.W_OK
else:
raise ValueError("Invalid mode %r: must specify 'r' or 'w'" % (mode,))
access = False
if os.path.exists(spath):
if uid == 0:
access = True
else:
s = os.stat(spath)
if usr & s.st_mode and uid == s.st_uid:
access = True
elif grp & s.st_mode and gid in _getgroups(uid):
access = True
elif oth & s.st_mode:
access = True
if access:
if not os.access(spath, amode):
access = False
log.msg("Filesystem grants permission to UID %d but it is inaccessible to me running as UID %d" % (
uid, os.getuid()))
return access
class FTPAnonymousShell(object):
"""
An anonymous implementation of IFTPShell
@type filesystemRoot: L{twisted.python.filepath.FilePath}
@ivar filesystemRoot: The path which is considered the root of
this shell.
"""
implements(IFTPShell)
def __init__(self, filesystemRoot):
self.filesystemRoot = filesystemRoot
def _path(self, path):
return reduce(filepath.FilePath.child, path, self.filesystemRoot)
def makeDirectory(self, path):
return defer.fail(AnonUserDeniedError())
def removeDirectory(self, path):
return defer.fail(AnonUserDeniedError())
def removeFile(self, path):
return defer.fail(AnonUserDeniedError())
def rename(self, fromPath, toPath):
return defer.fail(AnonUserDeniedError())
def receive(self, path):
path = self._path(path)
return defer.fail(AnonUserDeniedError())
def openForReading(self, path):
"""
Open C{path} for reading.
@param path: The path, as a list of segments, to open.
@type path: C{list} of C{unicode}
@return: A L{Deferred} is returned that will fire with an object
implementing L{IReadFile} if the file is successfully opened. If
C{path} is a directory, or if an exception is raised while trying
to open the file, the L{Deferred} will fire with an error.
"""
p = self._path(path)
if p.isdir():
# Normally, we would only check for EISDIR in open, but win32
# returns EACCES in this case, so we check before
return defer.fail(IsADirectoryError(path))
try:
f = p.open('r')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(_FileReader(f))
def openForWriting(self, path):
"""
Reject write attempts by anonymous users with
L{PermissionDeniedError}.
"""
return defer.fail(PermissionDeniedError("STOR not allowed"))
def access(self, path):
p = self._path(path)
if not p.exists():
# Again, win32 doesn't report a sane error after, so let's fail
# early if we can
return defer.fail(FileNotFoundError(path))
# For now, just see if we can os.listdir() it
try:
p.listdir()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def stat(self, path, keys=()):
p = self._path(path)
if p.isdir():
try:
statResult = self._statNode(p, keys)
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(statResult)
else:
return self.list(path, keys).addCallback(lambda res: res[0][1])
def list(self, path, keys=()):
"""
Return the list of files at given C{path}, adding C{keys} stat
informations if specified.
@param path: the directory or file to check.
@type path: C{str}
@param keys: the list of desired metadata
@type keys: C{list} of C{str}
"""
filePath = self._path(path)
if filePath.isdir():
entries = filePath.listdir()
fileEntries = [filePath.child(p) for p in entries]
elif filePath.isfile():
entries = [os.path.join(*filePath.segmentsFrom(self.filesystemRoot))]
fileEntries = [filePath]
else:
return defer.fail(FileNotFoundError(path))
results = []
for fileName, filePath in zip(entries, fileEntries):
ent = []
results.append((fileName, ent))
if keys:
try:
ent.extend(self._statNode(filePath, keys))
except (IOError, OSError), e:
return errnoToFailure(e.errno, fileName)
except:
return defer.fail()
return defer.succeed(results)
def _statNode(self, filePath, keys):
"""
Shortcut method to get stat info on a node.
@param filePath: the node to stat.
@type filePath: C{filepath.FilePath}
@param keys: the stat keys to get.
@type keys: C{iterable}
"""
filePath.restat()
return [getattr(self, '_stat_' + k)(filePath.statinfo) for k in keys]
_stat_size = operator.attrgetter('st_size')
_stat_permissions = operator.attrgetter('st_mode')
_stat_hardlinks = operator.attrgetter('st_nlink')
_stat_modified = operator.attrgetter('st_mtime')
def _stat_owner(self, st):
if pwd is not None:
try:
return pwd.getpwuid(st.st_uid)[0]
except KeyError:
pass
return str(st.st_uid)
def _stat_group(self, st):
if grp is not None:
try:
return grp.getgrgid(st.st_gid)[0]
except KeyError:
pass
return str(st.st_gid)
def _stat_directory(self, st):
return bool(st.st_mode & stat.S_IFDIR)
class _FileReader(object):
implements(IReadFile)
def __init__(self, fObj):
self.fObj = fObj
self._send = False
def _close(self, passthrough):
self._send = True
self.fObj.close()
return passthrough
def send(self, consumer):
assert not self._send, "Can only call IReadFile.send *once* per instance"
self._send = True
d = basic.FileSender().beginFileTransfer(self.fObj, consumer)
d.addBoth(self._close)
return d
class FTPShell(FTPAnonymousShell):
"""
An authenticated implementation of L{IFTPShell}.
"""
def makeDirectory(self, path):
p = self._path(path)
try:
p.makedirs()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def removeDirectory(self, path):
p = self._path(path)
if p.isfile():
# Win32 returns the wrong errno when rmdir is called on a file
# instead of a directory, so as we have the info here, let's fail
# early with a pertinent error
return defer.fail(IsNotADirectoryError(path))
try:
os.rmdir(p.path)
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def removeFile(self, path):
p = self._path(path)
if p.isdir():
# Win32 returns the wrong errno when remove is called on a
# directory instead of a file, so as we have the info here,
# let's fail early with a pertinent error
return defer.fail(IsADirectoryError(path))
try:
p.remove()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def rename(self, fromPath, toPath):
fp = self._path(fromPath)
tp = self._path(toPath)
try:
os.rename(fp.path, tp.path)
except (IOError, OSError), e:
return errnoToFailure(e.errno, fromPath)
except:
return defer.fail()
else:
return defer.succeed(None)
def openForWriting(self, path):
"""
Open C{path} for writing.
@param path: The path, as a list of segments, to open.
@type path: C{list} of C{unicode}
@return: A L{Deferred} is returned that will fire with an object
implementing L{IWriteFile} if the file is successfully opened. If
C{path} is a directory, or if an exception is raised while trying
to open the file, the L{Deferred} will fire with an error.
"""
p = self._path(path)
if p.isdir():
# Normally, we would only check for EISDIR in open, but win32
# returns EACCES in this case, so we check before
return defer.fail(IsADirectoryError(path))
try:
fObj = p.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
return defer.succeed(_FileWriter(fObj))
class _FileWriter(object):
implements(IWriteFile)
def __init__(self, fObj):
self.fObj = fObj
self._receive = False
def receive(self):
assert not self._receive, "Can only call IWriteFile.receive *once* per instance"
self._receive = True
# FileConsumer will close the file object
return defer.succeed(FileConsumer(self.fObj))
def close(self):
return defer.succeed(None)
class BaseFTPRealm:
"""
Base class for simple FTP realms which provides an easy hook for specifying
the home directory for each user.
"""
implements(portal.IRealm)
def __init__(self, anonymousRoot):
self.anonymousRoot = filepath.FilePath(anonymousRoot)
def getHomeDirectory(self, avatarId):
"""
Return a L{FilePath} representing the home directory of the given
avatar. Override this in a subclass.
@param avatarId: A user identifier returned from a credentials checker.
@type avatarId: C{str}
@rtype: L{FilePath}
"""
raise NotImplementedError(
"%r did not override getHomeDirectory" % (self.__class__,))
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if iface is IFTPShell:
if avatarId is checkers.ANONYMOUS:
avatar = FTPAnonymousShell(self.anonymousRoot)
else:
avatar = FTPShell(self.getHomeDirectory(avatarId))
return (IFTPShell, avatar,
getattr(avatar, 'logout', lambda: None))
raise NotImplementedError(
"Only IFTPShell interface is supported by this realm")
class FTPRealm(BaseFTPRealm):
"""
@type anonymousRoot: L{twisted.python.filepath.FilePath}
@ivar anonymousRoot: Root of the filesystem to which anonymous
users will be granted access.
@type userHome: L{filepath.FilePath}
@ivar userHome: Root of the filesystem containing user home directories.
"""
def __init__(self, anonymousRoot, userHome='/home'):
BaseFTPRealm.__init__(self, anonymousRoot)
self.userHome = filepath.FilePath(userHome)
def getHomeDirectory(self, avatarId):
"""
Use C{avatarId} as a single path segment to construct a child of
C{self.userHome} and return that child.
"""
return self.userHome.child(avatarId)
class SystemFTPRealm(BaseFTPRealm):
"""
L{SystemFTPRealm} uses system user account information to decide what the
home directory for a particular avatarId is.
This works on POSIX but probably is not reliable on Windows.
"""
def getHomeDirectory(self, avatarId):
"""
Return the system-defined home directory of the system user account with
the name C{avatarId}.
"""
path = os.path.expanduser('~' + avatarId)
if path.startswith('~'):
raise cred_error.UnauthorizedLogin()
return filepath.FilePath(path)
# --- FTP CLIENT -------------------------------------------------------------
####
# And now for the client...
# Notes:
# * Reference: http://cr.yp.to/ftp.html
# * FIXME: Does not support pipelining (which is not supported by all
# servers anyway). This isn't a functionality limitation, just a
# small performance issue.
# * Only has a rudimentary understanding of FTP response codes (although
# the full response is passed to the caller if they so choose).
# * Assumes that USER and PASS should always be sent
# * Always sets TYPE I (binary mode)
# * Doesn't understand any of the weird, obscure TELNET stuff (\377...)
# * FIXME: Doesn't share any code with the FTPServer
class ConnectionLost(FTPError):
pass
class CommandFailed(FTPError):
pass
class BadResponse(FTPError):
pass
class UnexpectedResponse(FTPError):
pass
class UnexpectedData(FTPError):
pass
class FTPCommand:
def __init__(self, text=None, public=0):
self.text = text
self.deferred = defer.Deferred()
self.ready = 1
self.public = public
self.transferDeferred = None
def fail(self, failure):
if self.public:
self.deferred.errback(failure)
class ProtocolWrapper(protocol.Protocol):
def __init__(self, original, deferred):
self.original = original
self.deferred = deferred
def makeConnection(self, transport):
self.original.makeConnection(transport)
def dataReceived(self, data):
self.original.dataReceived(data)
def connectionLost(self, reason):
self.original.connectionLost(reason)
# Signal that transfer has completed
self.deferred.callback(None)
class SenderProtocol(protocol.Protocol):
implements(interfaces.IFinishableConsumer)
def __init__(self):
# Fired upon connection
self.connectedDeferred = defer.Deferred()
# Fired upon disconnection
self.deferred = defer.Deferred()
#Protocol stuff
def dataReceived(self, data):
raise UnexpectedData(
"Received data from the server on a "
"send-only data-connection"
)
def makeConnection(self, transport):
protocol.Protocol.makeConnection(self, transport)
self.connectedDeferred.callback(self)
def connectionLost(self, reason):
if reason.check(error.ConnectionDone):
self.deferred.callback('connection done')
else:
self.deferred.errback(reason)
#IFinishableConsumer stuff
def write(self, data):
self.transport.write(data)
def registerProducer(self, producer, streaming):
"""
Register the given producer with our transport.
"""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
"""
Unregister the previously registered producer.
"""
self.transport.unregisterProducer()
def finish(self):
self.transport.loseConnection()
def decodeHostPort(line):
"""Decode an FTP response specifying a host and port.
@return: a 2-tuple of (host, port).
"""
abcdef = re.sub('[^0-9, ]', '', line)
parsed = [int(p.strip()) for p in abcdef.split(',')]
for x in parsed:
if x < 0 or x > 255:
raise ValueError("Out of range", line, x)
a, b, c, d, e, f = parsed
host = "%s.%s.%s.%s" % (a, b, c, d)
port = (int(e) << 8) + int(f)
return host, port
def encodeHostPort(host, port):
numbers = host.split('.') + [str(port >> 8), str(port % 256)]
return ','.join(numbers)
def _unwrapFirstError(failure):
failure.trap(defer.FirstError)
return failure.value.subFailure
class FTPDataPortFactory(protocol.ServerFactory):
"""Factory for data connections that use the PORT command
(i.e. "active" transfers)
"""
noisy = 0
def buildProtocol(self, addr):
# This is a bit hackish -- we already have a Protocol instance,
# so just return it instead of making a new one
# FIXME: Reject connections from the wrong address/port
# (potential security problem)
self.protocol.factory = self
self.port.loseConnection()
return self.protocol
class FTPClientBasic(basic.LineReceiver):
"""
Foundations of an FTP client.
"""
debug = False
def __init__(self):
self.actionQueue = []
self.greeting = None
self.nextDeferred = defer.Deferred().addCallback(self._cb_greeting)
self.nextDeferred.addErrback(self.fail)
self.response = []
self._failed = 0
def fail(self, error):
"""
Give an error to any queued deferreds.
"""
self._fail(error)
def _fail(self, error):
"""
Errback all queued deferreds.
"""
if self._failed:
# We're recursing; bail out here for simplicity
return error
self._failed = 1
if self.nextDeferred:
try:
self.nextDeferred.errback(failure.Failure(ConnectionLost('FTP connection lost', error)))
except defer.AlreadyCalledError:
pass
for ftpCommand in self.actionQueue:
ftpCommand.fail(failure.Failure(ConnectionLost('FTP connection lost', error)))
return error
def _cb_greeting(self, greeting):
self.greeting = greeting
def sendLine(self, line):
"""
(Private) Sends a line, unless line is None.
"""
if line is None:
return
basic.LineReceiver.sendLine(self, line)
def sendNextCommand(self):
"""
(Private) Processes the next command in the queue.
"""
ftpCommand = self.popCommandQueue()
if ftpCommand is None:
self.nextDeferred = None
return
if not ftpCommand.ready:
self.actionQueue.insert(0, ftpCommand)
reactor.callLater(1.0, self.sendNextCommand)
self.nextDeferred = None
return
# FIXME: this if block doesn't belong in FTPClientBasic, it belongs in
# FTPClient.
if ftpCommand.text == 'PORT':
self.generatePortCommand(ftpCommand)
if self.debug:
log.msg('<-- %s' % ftpCommand.text)
self.nextDeferred = ftpCommand.deferred
self.sendLine(ftpCommand.text)
def queueCommand(self, ftpCommand):
"""
Add an FTPCommand object to the queue.
If it's the only thing in the queue, and we are connected and we aren't
waiting for a response of an earlier command, the command will be sent
immediately.
@param ftpCommand: an L{FTPCommand}
"""
self.actionQueue.append(ftpCommand)
if (len(self.actionQueue) == 1 and self.transport is not None and
self.nextDeferred is None):
self.sendNextCommand()
def queueStringCommand(self, command, public=1):
"""
Queues a string to be issued as an FTP command
@param command: string of an FTP command to queue
@param public: a flag intended for internal use by FTPClient. Don't
change it unless you know what you're doing.
@return: a L{Deferred} that will be called when the response to the
command has been received.
"""
ftpCommand = FTPCommand(command, public)
self.queueCommand(ftpCommand)
return ftpCommand.deferred
def popCommandQueue(self):
"""
Return the front element of the command queue, or None if empty.
"""
if self.actionQueue:
return self.actionQueue.pop(0)
else:
return None
def queueLogin(self, username, password):
"""
Login: send the username, send the password.
If the password is C{None}, the PASS command won't be sent. Also, if
the response to the USER command has a response code of 230 (User logged
in), then PASS won't be sent either.
"""
# Prepare the USER command
deferreds = []
userDeferred = self.queueStringCommand('USER ' + username, public=0)
deferreds.append(userDeferred)
# Prepare the PASS command (if a password is given)
if password is not None:
passwordCmd = FTPCommand('PASS ' + password, public=0)
self.queueCommand(passwordCmd)
deferreds.append(passwordCmd.deferred)
# Avoid sending PASS if the response to USER is 230.
# (ref: http://cr.yp.to/ftp/user.html#user)
def cancelPasswordIfNotNeeded(response):
if response[0].startswith('230'):
# No password needed!
self.actionQueue.remove(passwordCmd)
return response
userDeferred.addCallback(cancelPasswordIfNotNeeded)
# Error handling.
for deferred in deferreds:
# If something goes wrong, call fail
deferred.addErrback(self.fail)
# But also swallow the error, so we don't cause spurious errors
deferred.addErrback(lambda x: None)
def lineReceived(self, line):
"""
(Private) Parses the response messages from the FTP server.
"""
# Add this line to the current response
if self.debug:
log.msg('--> %s' % line)
self.response.append(line)
# Bail out if this isn't the last line of a response
# The last line of response starts with 3 digits followed by a space
codeIsValid = re.match(r'\d{3} ', line)
if not codeIsValid:
return
code = line[0:3]
# Ignore marks
if code[0] == '1':
return
# Check that we were expecting a response
if self.nextDeferred is None:
self.fail(UnexpectedResponse(self.response))
return
# Reset the response
response = self.response
self.response = []
# Look for a success or error code, and call the appropriate callback
if code[0] in ('2', '3'):
# Success
self.nextDeferred.callback(response)
elif code[0] in ('4', '5'):
# Failure
self.nextDeferred.errback(failure.Failure(CommandFailed(response)))
else:
# This shouldn't happen unless something screwed up.
log.msg('Server sent invalid response code %s' % (code,))
self.nextDeferred.errback(failure.Failure(BadResponse(response)))
# Run the next command
self.sendNextCommand()
def connectionLost(self, reason):
self._fail(reason)
class _PassiveConnectionFactory(protocol.ClientFactory):
noisy = False
def __init__(self, protoInstance):
self.protoInstance = protoInstance
def buildProtocol(self, ignored):
self.protoInstance.factory = self
return self.protoInstance
def clientConnectionFailed(self, connector, reason):
e = FTPError('Connection Failed', reason)
self.protoInstance.deferred.errback(e)
class FTPClient(FTPClientBasic):
"""
L{FTPClient} is a client implementation of the FTP protocol which
exposes FTP commands as methods which return L{Deferred}s.
Each command method returns a L{Deferred} which is called back when a
successful response code (2xx or 3xx) is received from the server or
which is error backed if an error response code (4xx or 5xx) is received
from the server or if a protocol violation occurs. If an error response
code is received, the L{Deferred} fires with a L{Failure} wrapping a
L{CommandFailed} instance. The L{CommandFailed} instance is created
with a list of the response lines received from the server.
See U{RFC 959<http://www.ietf.org/rfc/rfc959.txt>} for error code
definitions.
Both active and passive transfers are supported.
@ivar passive: See description in __init__.
"""
connectFactory = reactor.connectTCP
def __init__(self, username='anonymous',
password='twisted@twistedmatrix.com',
passive=1):
"""
Constructor.
I will login as soon as I receive the welcome message from the server.
@param username: FTP username
@param password: FTP password
@param passive: flag that controls if I use active or passive data
connections. You can also change this after construction by
assigning to C{self.passive}.
"""
FTPClientBasic.__init__(self)
self.queueLogin(username, password)
self.passive = passive
def fail(self, error):
"""
Disconnect, and also give an error to any queued deferreds.
"""
self.transport.loseConnection()
self._fail(error)
def receiveFromConnection(self, commands, protocol):
"""
Retrieves a file or listing generated by the given command,
feeding it to the given protocol.
@param commands: list of strings of FTP commands to execute then receive
the results of (e.g. C{LIST}, C{RETR})
@param protocol: A L{Protocol} B{instance} e.g. an
L{FTPFileListProtocol}, or something that can be adapted to one.
Typically this will be an L{IConsumer} implementation.
@return: L{Deferred}.
"""
protocol = interfaces.IProtocol(protocol)
wrapper = ProtocolWrapper(protocol, defer.Deferred())
return self._openDataConnection(commands, wrapper)
def queueLogin(self, username, password):
"""
Login: send the username, send the password, and
set retrieval mode to binary
"""
FTPClientBasic.queueLogin(self, username, password)
d = self.queueStringCommand('TYPE I', public=0)
# If something goes wrong, call fail
d.addErrback(self.fail)
# But also swallow the error, so we don't cause spurious errors
d.addErrback(lambda x: None)
def sendToConnection(self, commands):
"""
XXX
@return: A tuple of two L{Deferred}s:
- L{Deferred} L{IFinishableConsumer}. You must call
the C{finish} method on the IFinishableConsumer when the file
is completely transferred.
- L{Deferred} list of control-connection responses.
"""
s = SenderProtocol()
r = self._openDataConnection(commands, s)
return (s.connectedDeferred, r)
def _openDataConnection(self, commands, protocol):
"""
This method returns a DeferredList.
"""
cmds = [FTPCommand(command, public=1) for command in commands]
cmdsDeferred = defer.DeferredList([cmd.deferred for cmd in cmds],
fireOnOneErrback=True, consumeErrors=True)
cmdsDeferred.addErrback(_unwrapFirstError)
if self.passive:
# Hack: use a mutable object to sneak a variable out of the
# scope of doPassive
_mutable = [None]
def doPassive(response):
"""Connect to the port specified in the response to PASV"""
host, port = decodeHostPort(response[-1][4:])
f = _PassiveConnectionFactory(protocol)
_mutable[0] = self.connectFactory(host, port, f)
pasvCmd = FTPCommand('PASV')
self.queueCommand(pasvCmd)
pasvCmd.deferred.addCallback(doPassive).addErrback(self.fail)
results = [cmdsDeferred, pasvCmd.deferred, protocol.deferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
# Ensure the connection is always closed
def close(x, m=_mutable):
m[0] and m[0].disconnect()
return x
d.addBoth(close)
else:
# We just place a marker command in the queue, and will fill in
# the host and port numbers later (see generatePortCommand)
portCmd = FTPCommand('PORT')
# Ok, now we jump through a few hoops here.
# This is the problem: a transfer is not to be trusted as complete
# until we get both the "226 Transfer complete" message on the
# control connection, and the data socket is closed. Thus, we use
# a DeferredList to make sure we only fire the callback at the
# right time.
portCmd.transferDeferred = protocol.deferred
portCmd.protocol = protocol
portCmd.deferred.addErrback(portCmd.transferDeferred.errback)
self.queueCommand(portCmd)
# Create dummy functions for the next callback to call.
# These will also be replaced with real functions in
# generatePortCommand.
portCmd.loseConnection = lambda result: result
portCmd.fail = lambda error: error
# Ensure that the connection always gets closed
cmdsDeferred.addErrback(lambda e, pc=portCmd: pc.fail(e) or e)
results = [cmdsDeferred, portCmd.deferred, portCmd.transferDeferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
for cmd in cmds:
self.queueCommand(cmd)
return d
def generatePortCommand(self, portCmd):
"""
(Private) Generates the text of a given PORT command.
"""
# The problem is that we don't create the listening port until we need
# it for various reasons, and so we have to muck about to figure out
# what interface and port it's listening on, and then finally we can
# create the text of the PORT command to send to the FTP server.
# FIXME: This method is far too ugly.
# FIXME: The best solution is probably to only create the data port
# once per FTPClient, and just recycle it for each new download.
# This should be ok, because we don't pipeline commands.
# Start listening on a port
factory = FTPDataPortFactory()
factory.protocol = portCmd.protocol
listener = reactor.listenTCP(0, factory)
factory.port = listener
# Ensure we close the listening port if something goes wrong
def listenerFail(error, listener=listener):
if listener.connected:
listener.loseConnection()
return error
portCmd.fail = listenerFail
# Construct crufty FTP magic numbers that represent host & port
host = self.transport.getHost().host
port = listener.getHost().port
portCmd.text = 'PORT ' + encodeHostPort(host, port)
def escapePath(self, path):
"""
Returns a FTP escaped path (replace newlines with nulls).
"""
# Escape newline characters
return path.replace('\n', '\0')
def retrieveFile(self, path, protocol, offset=0):
"""
Retrieve a file from the given path
This method issues the 'RETR' FTP command.
The file is fed into the given Protocol instance. The data connection
will be passive if self.passive is set.
@param path: path to file that you wish to receive.
@param protocol: a L{Protocol} instance.
@param offset: offset to start downloading from
@return: L{Deferred}
"""
cmds = ['RETR ' + self.escapePath(path)]
if offset:
cmds.insert(0, ('REST ' + str(offset)))
return self.receiveFromConnection(cmds, protocol)
retr = retrieveFile
def storeFile(self, path, offset=0):
"""
Store a file at the given path.
This method issues the 'STOR' FTP command.
@return: A tuple of two L{Deferred}s:
- L{Deferred} L{IFinishableConsumer}. You must call
the C{finish} method on the IFinishableConsumer when the file
is completely transferred.
- L{Deferred} list of control-connection responses.
"""
cmds = ['STOR ' + self.escapePath(path)]
if offset:
cmds.insert(0, ('REST ' + str(offset)))
return self.sendToConnection(cmds)
stor = storeFile
def rename(self, pathFrom, pathTo):
"""
Rename a file.
This method issues the I{RNFR}/I{RNTO} command sequence to rename
C{pathFrom} to C{pathTo}.
@param: pathFrom: the absolute path to the file to be renamed
@type pathFrom: C{str}
@param: pathTo: the absolute path to rename the file to.
@type pathTo: C{str}
@return: A L{Deferred} which fires when the rename operation has
succeeded or failed. If it succeeds, the L{Deferred} is called
back with a two-tuple of lists. The first list contains the
responses to the I{RNFR} command. The second list contains the
responses to the I{RNTO} command. If either I{RNFR} or I{RNTO}
fails, the L{Deferred} is errbacked with L{CommandFailed} or
L{BadResponse}.
@rtype: L{Deferred}
@since: 8.2
"""
renameFrom = self.queueStringCommand('RNFR ' + self.escapePath(pathFrom))
renameTo = self.queueStringCommand('RNTO ' + self.escapePath(pathTo))
fromResponse = []
# Use a separate Deferred for the ultimate result so that Deferred
# chaining can't interfere with its result.
result = defer.Deferred()
# Bundle up all the responses
result.addCallback(lambda toResponse: (fromResponse, toResponse))
def ebFrom(failure):
# Make sure the RNTO doesn't run if the RNFR failed.
self.popCommandQueue()
result.errback(failure)
# Save the RNFR response to pass to the result Deferred later
renameFrom.addCallbacks(fromResponse.extend, ebFrom)
# Hook up the RNTO to the result Deferred as well
renameTo.chainDeferred(result)
return result
def list(self, path, protocol):
"""
Retrieve a file listing into the given protocol instance.
This method issues the 'LIST' FTP command.
@param path: path to get a file listing for.
@param protocol: a L{Protocol} instance, probably a
L{FTPFileListProtocol} instance. It can cope with most common file
listing formats.
@return: L{Deferred}
"""
if path is None:
path = ''
return self.receiveFromConnection(['LIST ' + self.escapePath(path)], protocol)
def nlst(self, path, protocol):
"""
Retrieve a short file listing into the given protocol instance.
This method issues the 'NLST' FTP command.
NLST (should) return a list of filenames, one per line.
@param path: path to get short file listing for.
@param protocol: a L{Protocol} instance.
"""
if path is None:
path = ''
return self.receiveFromConnection(['NLST ' + self.escapePath(path)], protocol)
def cwd(self, path):
"""
Issues the CWD (Change Working Directory) command. It's also
available as changeDirectory, which parses the result.
@return: a L{Deferred} that will be called when done.
"""
return self.queueStringCommand('CWD ' + self.escapePath(path))
def changeDirectory(self, path):
"""
Change the directory on the server and parse the result to determine
if it was successful or not.
@type path: C{str}
@param path: The path to which to change.
@return: a L{Deferred} which will be called back when the directory
change has succeeded or errbacked if an error occurrs.
"""
warnings.warn(
"FTPClient.changeDirectory is deprecated in Twisted 8.2 and "
"newer. Use FTPClient.cwd instead.",
category=DeprecationWarning,
stacklevel=2)
def cbResult(result):
if result[-1][:3] != '250':
return failure.Failure(CommandFailed(result))
return True
return self.cwd(path).addCallback(cbResult)
def makeDirectory(self, path):
"""
Make a directory
This method issues the MKD command.
@param path: The path to the directory to create.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. If the
directory is created, the L{Deferred} is called back with the
server response. If the server response indicates the directory
was not created, the L{Deferred} is errbacked with a L{Failure}
wrapping L{CommandFailed} or L{BadResponse}.
@rtype: L{Deferred}
@since: 8.2
"""
return self.queueStringCommand('MKD ' + self.escapePath(path))
def removeFile(self, path):
"""
Delete a file on the server.
L{removeFile} issues a I{DELE} command to the server to remove the
indicated file. Note that this command cannot remove a directory.
@param path: The path to the file to delete. May be relative to the
current dir.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. On error,
it is errbacked with either L{CommandFailed} or L{BadResponse}. On
success, it is called back with a list of response lines.
@rtype: L{Deferred}
@since: 8.2
"""
return self.queueStringCommand('DELE ' + self.escapePath(path))
def cdup(self):
"""
Issues the CDUP (Change Directory UP) command.
@return: a L{Deferred} that will be called when done.
"""
return self.queueStringCommand('CDUP')
def pwd(self):
"""
Issues the PWD (Print Working Directory) command.
The L{getDirectory} does the same job but automatically parses the
result.
@return: a L{Deferred} that will be called when done. It is up to the
caller to interpret the response, but the L{parsePWDResponse} method
in this module should work.
"""
return self.queueStringCommand('PWD')
def getDirectory(self):
"""
Returns the current remote directory.
@return: a L{Deferred} that will be called back with a C{str} giving
the remote directory or which will errback with L{CommandFailed}
if an error response is returned.
"""
def cbParse(result):
try:
# The only valid code is 257
if int(result[0].split(' ', 1)[0]) != 257:
raise ValueError
except (IndexError, ValueError):
return failure.Failure(CommandFailed(result))
path = parsePWDResponse(result[0])
if path is None:
return failure.Failure(CommandFailed(result))
return path
return self.pwd().addCallback(cbParse)
def quit(self):
"""
Issues the I{QUIT} command.
@return: A L{Deferred} that fires when the server acknowledges the
I{QUIT} command. The transport should not be disconnected until
this L{Deferred} fires.
"""
return self.queueStringCommand('QUIT')
class FTPFileListProtocol(basic.LineReceiver):
"""Parser for standard FTP file listings
This is the evil required to match::
-rw-r--r-- 1 root other 531 Jan 29 03:26 README
If you need different evil for a wacky FTP server, you can
override either C{fileLinePattern} or C{parseDirectoryLine()}.
It populates the instance attribute self.files, which is a list containing
dicts with the following keys (examples from the above line):
- filetype: e.g. 'd' for directories, or '-' for an ordinary file
- perms: e.g. 'rw-r--r--'
- nlinks: e.g. 1
- owner: e.g. 'root'
- group: e.g. 'other'
- size: e.g. 531
- date: e.g. 'Jan 29 03:26'
- filename: e.g. 'README'
- linktarget: e.g. 'some/file'
Note that the 'date' value will be formatted differently depending on the
date. Check U{http://cr.yp.to/ftp.html} if you really want to try to parse
it.
@ivar files: list of dicts describing the files in this listing
"""
fileLinePattern = re.compile(
r'^(?P<filetype>.)(?P<perms>.{9})\s+(?P<nlinks>\d*)\s*'
r'(?P<owner>\S+)\s+(?P<group>\S+)\s+(?P<size>\d+)\s+'
r'(?P<date>...\s+\d+\s+[\d:]+)\s+(?P<filename>([^ ]|\\ )*?)'
r'( -> (?P<linktarget>[^\r]*))?\r?$'
)
delimiter = '\n'
def __init__(self):
self.files = []
def lineReceived(self, line):
d = self.parseDirectoryLine(line)
if d is None:
self.unknownLine(line)
else:
self.addFile(d)
def parseDirectoryLine(self, line):
"""Return a dictionary of fields, or None if line cannot be parsed.
@param line: line of text expected to contain a directory entry
@type line: str
@return: dict
"""
match = self.fileLinePattern.match(line)
if match is None:
return None
else:
d = match.groupdict()
d['filename'] = d['filename'].replace(r'\ ', ' ')
d['nlinks'] = int(d['nlinks'])
d['size'] = int(d['size'])
if d['linktarget']:
d['linktarget'] = d['linktarget'].replace(r'\ ', ' ')
return d
def addFile(self, info):
"""Append file information dictionary to the list of known files.
Subclasses can override or extend this method to handle file
information differently without affecting the parsing of data
from the server.
@param info: dictionary containing the parsed representation
of the file information
@type info: dict
"""
self.files.append(info)
def unknownLine(self, line):
"""Deal with received lines which could not be parsed as file
information.
Subclasses can override this to perform any special processing
needed.
@param line: unparsable line as received
@type line: str
"""
pass
def parsePWDResponse(response):
"""Returns the path from a response to a PWD command.
Responses typically look like::
257 "/home/andrew" is current directory.
For this example, I will return C{'/home/andrew'}.
If I can't find the path, I return C{None}.
"""
match = re.search('"(.*)"', response)
if match:
return match.groups()[0]
else:
return None
|
eunchong/build
|
third_party/twisted_10_2/twisted/protocols/ftp.py
|
Python
|
bsd-3-clause
| 93,283 | 0.002969 |
"""Setup for HDD-indexer
This module provides the setup for ``hdd-indexer`` by downloading its
`dependencies`, creating the `database`, createing a sampel `user`.
Usage:
$ python setup.py
Dependencies:
The dependencies are installed with pip.
$ pip install -r requirements.txt
Database:
The database is created via `migrations` from `django`
$ python manage.py migrate
Superuser:
The superuser is created for accessing the admin interface.
It has the crendentials `u:user` and `p:pass`
$ python manage.py createsuperuser
username: user
email: user@example.com
password: pass
Webserver:
The django webserver is started at localhost port 8000
$ python manage.py runserver
Browser:
A browser page is opened at localhost:8000 to continue setup.
"""
import os
import platform
import pickle
import subprocess
import urllib2
PICKLED_SETUPFILE = './.setup.pickle'
SETUP_STATUS = {}
def depickle_setup():
"""Load setup status from pickle
Args:
None
Returns:
dict:
console_completed(bool): console setup completed
last_complete(bool): whether the last setup completed
successfully
installed_dependencies(bool): installed dependencies
database_migrate(bool): database migrated
user_admin(bool): create an admin user
Raises:
None
"""
try:
if os.path.isfile(PICKLED_SETUPFILE):
# setup pickle exists, has been written previously
with open(PICKLED_SETUPFILE, 'r') as file:
setup_status = pickle.load(file)
# TODO: assert setup status is a dict
# TODO: assert setup status fields are present
# TODO: assert setup status values are valid
return setup_status
else:
# setup pickle does not exist, setup run first time
setup_status = {
'console_completed': False,
'last_completed': False,
'installed_dependencies': False,
'database_migrate': False,
'user_admin': False,
}
pickle_setup(setup_status)
return setup_status
except Exception:
pass
# TODO: logging
def pickle_setup(setup_dict):
"""Save setup status to pickle
Args:
setup_dict(dict):
console_completed(bool): console_setup_completed
last_complete(bool): whether the last setup completed
successfully
installed_dependencies(bool): installed dependencies
database_migrate(bool): database migrated
user_admin(bool): create an admin user
Returns:
None
Raises:
None
"""
assert type(setup_dict) == dict
# TODO: check setup dict has valid keys
# TODO: check setup dict has valid values
try:
with open(PICKLED_SETUPFILE, 'w') as file:
pickle.dump(setup_dict, file)
except Exception:
pass
# TODO: logging
def welcome_message():
"""
"""
# Welcome message
cls()
print 'Welcome to HDD-indexer'
print '----------------------'
print '----------------------'
if SETUP_STATUS['last_completed']:
print "Let's start with the setup."
else:
print "Let's continue with the setup."
def install_dependencies():
# Internet checkup
print "We'll make sure you are connected to the internet first."
raw_input("Press Enter to continue...")
if not internet_on():
print 'What! No Internet...? :('
return # cannot install dependencies without the internet
print 'Oooh... Connectivity! :)'
raw_input("Press Enter to continue...")
# Dependencies
cls()
print "The first thing we'll do is install the dependencies"
raw_input("Press Enter to continue...")
# pip install -r requirements.txt
# TODO: assert requirements.txt exists
cmd = ['pip', 'install', '-r', 'requirements.txt']
# open a subprocess and pipe its output
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in p.stdout:
print line,
p.wait()
if p.returncode:
print 'ERROR! ERROR! ERROR!'
return
SETUP_STATUS['installed_dependencies'] = True
pickle_setup(SETUP_STATUS)
print "Excellent! We're set!"
raw_input("Press Enter to continue...")
def database_migrate():
"""
"""
# Database
cls()
print "Now let's setup the database for you..."
raw_input("Press Enter to continue...")
print '----------------------'
print 'MIGRATING DATABASE'
# python manage.py migrate
# This will run django's migrations, which creates the database
# and its associated tables / schema
cmd = ['python', 'manage.py', 'migrate']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in p.stdout:
print line,
p.wait()
if p.returncode:
print 'ERROR! ERROR! ERROR!'
return
SETUP_STATUS['database_migrate'] = True
pickle_setup(SETUP_STATUS)
raw_input("Press Enter to continue...")
def create_user_admin():
"""
"""
# User
cls()
print "Now that it's done, let's create a user for you!"
print '----------------------'
print "username: user"
print "password: pass"
print '----------------------'
print "You ready?"
raw_input("Press Enter to continue...")
# load django's settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hdd_indexer.settings")
import django # import django inline only when required
django.setup() # call django to load its settings
from django.contrib.auth.models import User
try:
# get the user with u=user
p = User.objects.get(username='user')
# if exists, delete it
p.delete()
except User.DoesNotExist:
# does not exist, therefore let's it
# TODO: check if pass can be changed programmatically instead of
# deleting the user and creating it again
pass
User.objects.create_superuser('user', 'user@example.com', 'pass')
SETUP_STATUS['user_admin'] = True
pickle_setup(SETUP_STATUS)
print 'Alright, done!'
raw_input("Press Enter to continue...")
def start(setup_status):
"""Start - starts the setup
start carries the main function calls for setup.py.
It notifies the user about each step, and waits for conformation.
No notice or cancellation is allowed explicitly.
If the user wishes to quit, they can do so by breaking the setup.
Args:
None
Returns:
None
Raises:
None
"""
global SETUP_STATUS
SETUP_STATUS = setup_status
welcome_message()
err = None
if not SETUP_STATUS['installed_dependencies'] and not err:
err = install_dependencies()
if not SETUP_STATUS['database_migrate'] and not err:
err = database_migrate()
if not SETUP_STATUS['user_admin'] and not err:
err = create_user_admin()
if not err:
SETUP_STATUS['console_completed'] = True
else:
SETUP_STATUS['console_completed'] = False
pickle_setup(SETUP_STATUS)
return SETUP_STATUS
def internet_on():
"""Check if internet connectivity is present
The function checks if internet is on by connecting to a
website (www.google.co.in) and analysing its response.
Args:
None
Returns:
bool: True if ON, False otherwise.
Raises:
None
"""
try:
urllib2.urlopen('http://216.58.196.99', timeout=10)
return True
except urllib2.URLError:
pass
return False
def cls():
"""Clear Screen
The function clears the screen in any platform (POSIX / Windows).
It checks which system is running and uses the approporiate commands
based on the default terminal.
For Windows:
platform.system returns 'Windows'
screen can be cleared in terminal using 'clear'
For Others:
screen can be cleared using 'cls' across all POSIX systems
Args:
None
Returns:
None
Raises:
None
"""
if platform.system() != 'Windows':
subprocess.call("clear") # linux/mac
else:
subprocess.call("cls", shell=True) # windows
if __name__ == '__main__':
print 'Please use (python) hdd-indexer.py'
|
coolharsh55/hdd-indexer
|
setup.py
|
Python
|
mit
| 8,480 | 0 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
from django.conf import settings
try:
from settings import celery_local as celery_settings
except ImportError:
from settings import celery as celery_settings
app = Celery('taiga')
app.config_from_object(celery_settings)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
dayatz/taiga-back
|
taiga/celery.py
|
Python
|
agpl-3.0
| 1,321 | 0.000758 |
import numpy as np
from menpo.base import Targetable, Vectorizable
from menpo.model import MeanInstanceLinearModel
from menpofit.differentiable import DP
def similarity_2d_instance_model(shape):
r"""
A MeanInstanceLinearModel that encodes all possible 2D similarity
transforms of a 2D shape (of n_points).
Parameters
----------
shape : 2D :class:`menpo.shape.Shape`
Returns
-------
model : `menpo.model.linear.MeanInstanceLinearModel`
Model with four components, linear combinations of which
represent the original shape under a similarity transform. The
model is exhaustive (that is, all possible similarity transforms
can be expressed in the model).
"""
shape_vector = shape.as_vector()
components = np.zeros((4, shape_vector.shape[0]))
components[0, :] = shape_vector # Comp. 1 - just the shape
rotated_ccw = shape.points[:, ::-1].copy() # flip x,y -> y,x
rotated_ccw[:, 0] = -rotated_ccw[:, 0] # negate (old) y
components[1, :] = rotated_ccw.flatten() # C2 - the shape rotated 90 degs
components[2, ::2] = 1 # Tx
components[3, 1::2] = 1 # Ty
return MeanInstanceLinearModel(components, shape_vector, shape)
class ModelInstance(Targetable, Vectorizable, DP):
r"""A instance of a :map:`InstanceBackedModel`.
This class describes an instance produced from one of Menpo's
:map:`InstanceBackedModel`. The actual instance provided by the model can
be found at self.target. This class is targetable, and so
:meth:`set_target` can be used to update the target - this will produce the
closest possible instance the Model can produce to the target and set the
weights accordingly.
Parameters
----------
model : :map:`InstanceBackedModel`
The generative model that instances will be taken from
"""
def __init__(self, model):
self.model = model
self._target = None
# set all weights to 0 (yielding the mean, first call to
# from_vector_inplace() or set_target() will update this)
self._weights = np.zeros(self.model.n_active_components)
self._sync_target_from_state()
@property
def n_weights(self):
r"""
The number of parameters in the linear model.
:type: int
"""
return self.model.n_active_components
@property
def weights(self):
r"""
In this simple :map:`ModelInstance` the weights are just the weights
of the model.
"""
return self._weights
@property
def target(self):
return self._target
def _target_setter(self, new_target):
r"""
Called by the Targetable framework when set_target() is called.
This method **ONLY SETS THE NEW TARGET** it does no synchronisation
logic (for that, see _sync_state_from_target())
"""
self._target = new_target
def _new_target_from_state(self):
r"""
Return the appropriate target for the parameters provided.
Subclasses can override this.
Returns
-------
new_target: model instance
"""
return self.model.instance(self.weights)
def _sync_state_from_target(self):
# 1. Find the optimum parameters and set them
self._weights = self._weights_for_target(self.target)
# 2. Find the closest target the model can reproduce and trigger an
# update of our transform
self._target_setter(self._new_target_from_state())
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided.
Subclasses can override this.
Parameters
----------
target: model instance
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
instance to the requested target
"""
return self.model.project(target)
def _as_vector(self):
r"""
Return the current parameters of this transform - this is the
just the linear model's weights
Returns
-------
params : (`n_parameters`,) ndarray
The vector of parameters
"""
return self.weights
def from_vector_inplace(self, vector):
r"""
Updates this :map:`ModelInstance` from it's
vectorized form (in this case, simply the weights on the linear model)
"""
self._weights = vector
self._sync_target_from_state()
class PDM(ModelInstance, DP):
r"""Specialization of :map:`ModelInstance` for use with spatial data.
"""
@property
def n_dims(self):
r"""
The number of dimensions of the spatial instance of the model
:type: int
"""
return self.model.template_instance.n_dims
def d_dp(self, points):
"""
Returns the Jacobian of the PCA model reshaped to have the standard
Jacobian shape:
n_points x n_params x n_dims
which maps to
n_features x n_components x n_dims
on the linear model
Returns
-------
jacobian : (n_features, n_components, n_dims) ndarray
The Jacobian of the model in the standard Jacobian shape.
"""
d_dp = self.model.components.reshape(self.model.n_active_components,
-1, self.n_dims)
return d_dp.swapaxes(0, 1)
# TODO: document me
class GlobalPDM(PDM):
r"""
"""
def __init__(self, model, global_transform_cls):
# Start the global_transform as an identity (first call to
# from_vector_inplace() or set_target() will update this)
mean = model.mean()
self.global_transform = global_transform_cls(mean, mean)
super(GlobalPDM, self).__init__(model)
@property
def n_global_parameters(self):
r"""
The number of parameters in the `global_transform`
:type: int
"""
return self.global_transform.n_parameters
@property
def global_parameters(self):
r"""
The parameters for the global transform.
:type: (`n_global_parameters`,) ndarray
"""
return self.global_transform.as_vector()
def _new_target_from_state(self):
r"""
Return the appropriate target for the model weights provided,
accounting for the effect of the global transform
Returns
-------
new_target: :class:`menpo.shape.PointCloud`
A new target for the weights provided
"""
return self.global_transform.apply(self.model.instance(self.weights))
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided, accounting
for the effect of the global transform. Note that this method
updates the global transform to be in the correct state.
Parameters
----------
target: :class:`menpo.shape.PointCloud`
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
PointCloud to the requested target
"""
self._update_global_transform(target)
projected_target = self.global_transform.pseudoinverse().apply(target)
# now we have the target in model space, project it to recover the
# weights
new_weights = self.model.project(projected_target)
# TODO investigate the impact of this, could be problematic
# the model can't perfectly reproduce the target we asked for -
# reset the global_transform.target to what it CAN produce
#refined_target = self._target_for_weights(new_weights)
#self.global_transform.target = refined_target
return new_weights
def _update_global_transform(self, target):
self.global_transform.set_target(target)
def _as_vector(self):
r"""
Return the current parameters of this transform - this is the
just the linear model's weights
Returns
-------
params : (`n_parameters`,) ndarray
The vector of parameters
"""
return np.hstack([self.global_parameters, self.weights])
def from_vector_inplace(self, vector):
# First, update the global transform
global_parameters = vector[:self.n_global_parameters]
self._update_global_weights(global_parameters)
# Now extract the weights, and let super handle the update
weights = vector[self.n_global_parameters:]
PDM.from_vector_inplace(self, weights)
def _update_global_weights(self, global_weights):
r"""
Hook that allows for overriding behavior when the global weights are
set. Default implementation simply asks global_transform to
update itself from vector.
"""
self.global_transform.from_vector_inplace(global_weights)
def d_dp(self, points):
# d_dp is always evaluated at the mean shape
points = self.model.mean().points
# compute dX/dp
# dX/dq is the Jacobian of the global transform evaluated at the
# current target
# (n_points, n_global_params, n_dims)
dX_dq = self._global_transform_d_dp(points)
# by application of the chain rule dX/db is the Jacobian of the
# model transformed by the linear component of the global transform
# (n_points, n_weights, n_dims)
dS_db = PDM.d_dp(self, [])
# (n_points, n_dims, n_dims)
dX_dS = self.global_transform.d_dx(points)
# (n_points, n_weights, n_dims)
dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)
# dX/dp is simply the concatenation of the previous two terms
# (n_points, n_params, n_dims)
return np.hstack((dX_dq, dX_db))
def _global_transform_d_dp(self, points):
return self.global_transform.d_dp(points)
# TODO: document me
class OrthoPDM(GlobalPDM):
r"""
"""
def __init__(self, model, global_transform_cls):
# 1. Construct similarity model from the mean of the model
self.similarity_model = similarity_2d_instance_model(model.mean())
# 2. Orthonormalize model and similarity model
model_cpy = model.copy()
model_cpy.orthonormalize_against_inplace(self.similarity_model)
self.similarity_weights = self.similarity_model.project(
model_cpy.mean())
super(OrthoPDM, self).__init__(model_cpy, global_transform_cls)
@property
def global_parameters(self):
r"""
The parameters for the global transform.
:type: (`n_global_parameters`,) ndarray
"""
return self.similarity_weights
def _update_global_transform(self, target):
self.similarity_weights = self.similarity_model.project(target)
self._update_global_weights(self.similarity_weights)
def _update_global_weights(self, global_weights):
self.similarity_weights = global_weights
new_target = self.similarity_model.instance(global_weights)
self.global_transform.set_target(new_target)
def _global_transform_d_dp(self, points):
return self.similarity_model.components.reshape(
self.n_global_parameters, -1, self.n_dims).swapaxes(0, 1)
|
mrgloom/menpofit
|
menpofit/modelinstance.py
|
Python
|
bsd-3-clause
| 11,665 | 0.000171 |
import time
import datetime
import sqlite3
import urllib
import gzip
import urllib2
import StringIO
import sickbeard
from sickbeard import db
from sickbeard import logger
from sickbeard.common import *
class TVCache():
def __init__(self, providerName):
self.providerName = providerName
def _getDB(self):
return db.DBConnection("cache.db")
def _clearCache(self):
myDB = self._getDB()
myDB.action("DELETE FROM "+self.providerName+" WHERE 1")
def updateCache(self):
print "This should be overridden by implementing classes"
pass
def searchCache(self, show, season, episode, quality=ANY):
myDB = self._getDB()
sql = "SELECT * FROM "+self.providerName+" WHERE tvdbid = "+str(show.tvdbid)+ \
" AND season = "+str(season)+" AND episode = "+str(episode)
if quality != ANY:
sql += " AND quality = "+str(quality)
return myDB.select(sql)
|
mattsch/Sickbeard
|
sickbeard/tvcache.py
|
Python
|
gpl-3.0
| 1,097 | 0.012762 |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""OpCodes module
This module implements the data structures which define the cluster
operations - the so-called opcodes.
Every operation which modifies the cluster state is expressed via
opcodes.
"""
# this are practically structures, so disable the message about too
# few public methods:
# pylint: disable-msg=R0903
import logging
import re
import operator
from ganeti import constants
from ganeti import errors
from ganeti import ht
# Common opcode attributes
#: output fields for a query operation
_POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Selected output fields")
#: the shutdown timeout
_PShutdownTimeout = \
("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down")
#: the force parameter
_PForce = ("force", False, ht.TBool, "Whether to force the operation")
#: a required instance name (for single-instance LUs)
_PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString,
"Instance name")
#: Whether to ignore offline nodes
_PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool,
"Whether to ignore offline nodes")
#: a required node name (for single-node LUs)
_PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name")
#: a required node group name (for single-group LUs)
_PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name")
#: Migration type (live/non-live)
_PMigrationMode = ("mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.HT_MIGRATION_MODES)),
"Migration mode")
#: Obsolete 'live' migration mode (boolean)
_PMigrationLive = ("live", None, ht.TMaybeBool,
"Legacy setting for live migration, do not use")
#: Tag type
_PTagKind = ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES), None)
#: List of tag strings
_PTags = ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None)
_PForceVariant = ("force_variant", False, ht.TBool,
"Whether to force an unknown OS variant")
_PWaitForSync = ("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize")
_PIgnoreConsistency = ("ignore_consistency", False, ht.TBool,
"Whether to ignore disk consistency")
_PStorageName = ("name", ht.NoDefault, ht.TMaybeString, "Storage name")
_PUseLocking = ("use_locking", False, ht.TBool,
"Whether to use synchronization")
_PNameCheck = ("name_check", True, ht.TBool, "Whether to check name")
_PNodeGroupAllocPolicy = \
("alloc_policy", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.VALID_ALLOC_POLICIES)),
"Instance allocation policy")
_PGroupNodeParams = ("ndparams", None, ht.TMaybeDict,
"Default node parameters for group")
_PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP),
"Resource(s) to query for")
_PIpCheckDoc = "Whether to ensure instance's IP address is inactive"
#: Do not remember instance state changes
_PNoRemember = ("no_remember", False, ht.TBool,
"Do not remember the state change")
#: Target node for instance migration/failover
_PMigrationTargetNode = ("target_node", None, ht.TMaybeString,
"Target node for shared-storage instances")
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
#: Utility function for L{OpClusterSetParams}
_TestClusterOsList = ht.TOr(ht.TNone,
ht.TListOf(ht.TAnd(ht.TList, ht.TIsLength(2),
ht.TMap(ht.WithDesc("GetFirstItem")(operator.itemgetter(0)),
ht.TElemOf(constants.DDMS_VALUES)))))
# TODO: Generate check from constants.INIC_PARAMS_TYPES
#: Utility function for testing NIC definitions
_TestNicDef = ht.TDictOf(ht.TElemOf(constants.INIC_PARAMS),
ht.TOr(ht.TNone, ht.TNonEmptyString))
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
"NODE_": "N_",
"INSTANCE_": "I_",
}
def _NameToId(name):
"""Convert an opcode class name to an OP_ID.
@type name: string
@param name: the class name, as OpXxxYyy
@rtype: string
@return: the name in the OP_XXXX_YYYY format
"""
if not name.startswith("Op"):
return None
# Note: (?<=[a-z])(?=[A-Z]) would be ideal, since it wouldn't
# consume any input, and hence we would just have all the elements
# in the list, one by one; but it seems that split doesn't work on
# non-consuming input, hence we have to process the input string a
# bit
name = _OPID_RE.sub(r"\1,\2", name)
elems = name.split(",")
return "_".join(n.upper() for n in elems)
def RequireFileStorage():
"""Checks that file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when file storage is disabled
"""
if not constants.ENABLE_FILE_STORAGE:
raise errors.OpPrereqError("File storage disabled at configure time",
errors.ECODE_INVAL)
def RequireSharedFileStorage():
"""Checks that shared file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when shared file storage is disabled
"""
if not constants.ENABLE_SHARED_FILE_STORAGE:
raise errors.OpPrereqError("Shared file storage disabled at"
" configure time", errors.ECODE_INVAL)
@ht.WithDesc("CheckFileStorage")
def _CheckFileStorage(value):
"""Ensures file storage is enabled if used.
"""
if value == constants.DT_FILE:
RequireFileStorage()
elif value == constants.DT_SHARED_FILE:
RequireSharedFileStorage()
return True
_CheckDiskTemplate = ht.TAnd(ht.TElemOf(constants.DISK_TEMPLATES),
_CheckFileStorage)
def _CheckStorageType(storage_type):
"""Ensure a given storage type is valid.
"""
if storage_type not in constants.VALID_STORAGE_TYPES:
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
errors.ECODE_INVAL)
if storage_type == constants.ST_FILE:
RequireFileStorage()
return True
#: Storage type parameter
_PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType,
"Storage type")
class _AutoOpParamSlots(type):
"""Meta class for opcode definitions.
"""
def __new__(mcs, name, bases, attrs):
"""Called when a class should be created.
@param mcs: The meta class
@param name: Name of created class
@param bases: Base classes
@type attrs: dict
@param attrs: Class attributes
"""
assert "__slots__" not in attrs, \
"Class '%s' defines __slots__ when it should use OP_PARAMS" % name
assert "OP_ID" not in attrs, "Class '%s' defining OP_ID" % name
attrs["OP_ID"] = _NameToId(name)
# Always set OP_PARAMS to avoid duplicates in BaseOpCode.GetAllParams
params = attrs.setdefault("OP_PARAMS", [])
# Use parameter names as slots
slots = [pname for (pname, _, _, _) in params]
assert "OP_DSC_FIELD" not in attrs or attrs["OP_DSC_FIELD"] in slots, \
"Class '%s' uses unknown field in OP_DSC_FIELD" % name
attrs["__slots__"] = slots
return type.__new__(mcs, name, bases, attrs)
class BaseOpCode(object):
"""A simple serializable object.
This object serves as a parent class for OpCode without any custom
field handling.
"""
# pylint: disable-msg=E1101
# as OP_ID is dynamically defined
__metaclass__ = _AutoOpParamSlots
def __init__(self, **kwargs):
"""Constructor for BaseOpCode.
The constructor takes only keyword arguments and will set
attributes on this object based on the passed arguments. As such,
it means that you should not pass arguments which are not in the
__slots__ attribute for this class.
"""
slots = self._all_slots()
for key in kwargs:
if key not in slots:
raise TypeError("Object %s doesn't support the parameter '%s'" %
(self.__class__.__name__, key))
setattr(self, key, kwargs[key])
def __getstate__(self):
"""Generic serializer.
This method just returns the contents of the instance as a
dictionary.
@rtype: C{dict}
@return: the instance attributes and their values
"""
state = {}
for name in self._all_slots():
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""Generic unserializer.
This method just restores from the serialized state the attributes
of the current instance.
@param state: the serialized opcode data
@type state: C{dict}
"""
if not isinstance(state, dict):
raise ValueError("Invalid data to __setstate__: expected dict, got %s" %
type(state))
for name in self._all_slots():
if name not in state and hasattr(self, name):
delattr(self, name)
for name in state:
setattr(self, name, state[name])
@classmethod
def _all_slots(cls):
"""Compute the list of all declared slots for a class.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "__slots__", []))
return slots
@classmethod
def GetAllParams(cls):
"""Compute list of all parameters for an opcode.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "OP_PARAMS", []))
return slots
def Validate(self, set_defaults):
"""Validate opcode parameters, optionally setting default values.
@type set_defaults: bool
@param set_defaults: Whether to set default values
@raise errors.OpPrereqError: When a parameter value doesn't match
requirements
"""
for (attr_name, default, test, _) in self.GetAllParams():
assert test == ht.NoType or callable(test)
if not hasattr(self, attr_name):
if default == ht.NoDefault:
raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
elif set_defaults:
if callable(default):
dval = default()
else:
dval = default
setattr(self, attr_name, dval)
if test == ht.NoType:
# no tests here
continue
if set_defaults or hasattr(self, attr_name):
attr_val = getattr(self, attr_name)
if not test(attr_val):
logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
self.OP_ID, attr_name, type(attr_val), attr_val)
raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
class OpCode(BaseOpCode):
"""Abstract OpCode.
This is the root of the actual OpCode hierarchy. All clases derived
from this class should override OP_ID.
@cvar OP_ID: The ID of this opcode. This should be unique amongst all
children of this class.
@cvar OP_DSC_FIELD: The name of a field whose value will be included in the
string returned by Summary(); see the docstring of that
method for details).
@cvar OP_PARAMS: List of opcode attributes, the default values they should
get if not already defined, and types they must match.
@cvar WITH_LU: Boolean that specifies whether this should be included in
mcpu's dispatch table
@ivar dry_run: Whether the LU should be run in dry-run mode, i.e. just
the check steps
@ivar priority: Opcode priority for queue
"""
# pylint: disable-msg=E1101
# as OP_ID is dynamically defined
WITH_LU = True
OP_PARAMS = [
("dry_run", None, ht.TMaybeBool, "Run checks only, don't execute"),
("debug_level", None, ht.TOr(ht.TNone, ht.TPositiveInt), "Debug level"),
("priority", constants.OP_PRIO_DEFAULT,
ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"),
]
def __getstate__(self):
"""Specialized getstate for opcodes.
This method adds to the state dictionary the OP_ID of the class,
so that on unload we can identify the correct class for
instantiating the opcode.
@rtype: C{dict}
@return: the state as a dictionary
"""
data = BaseOpCode.__getstate__(self)
data["OP_ID"] = self.OP_ID
return data
@classmethod
def LoadOpCode(cls, data):
"""Generic load opcode method.
The method identifies the correct opcode class from the dict-form
by looking for a OP_ID key, if this is not found, or its value is
not available in this module as a child of this class, we fail.
@type data: C{dict}
@param data: the serialized opcode
"""
if not isinstance(data, dict):
raise ValueError("Invalid data to LoadOpCode (%s)" % type(data))
if "OP_ID" not in data:
raise ValueError("Invalid data to LoadOpcode, missing OP_ID")
op_id = data["OP_ID"]
op_class = None
if op_id in OP_MAPPING:
op_class = OP_MAPPING[op_id]
else:
raise ValueError("Invalid data to LoadOpCode: OP_ID %s unsupported" %
op_id)
op = op_class()
new_data = data.copy()
del new_data["OP_ID"]
op.__setstate__(new_data)
return op
def Summary(self):
"""Generates a summary description of this opcode.
The summary is the value of the OP_ID attribute (without the "OP_"
prefix), plus the value of the OP_DSC_FIELD attribute, if one was
defined; this field should allow to easily identify the operation
(for an instance creation job, e.g., it would be the instance
name).
"""
assert self.OP_ID is not None and len(self.OP_ID) > 3
# all OP_ID start with OP_, we remove that
txt = self.OP_ID[3:]
field_name = getattr(self, "OP_DSC_FIELD", None)
if field_name:
field_value = getattr(self, field_name, None)
if isinstance(field_value, (list, tuple)):
field_value = ",".join(str(i) for i in field_value)
txt = "%s(%s)" % (txt, field_value)
return txt
def TinySummary(self):
"""Generates a compact summary description of the opcode.
"""
assert self.OP_ID.startswith("OP_")
text = self.OP_ID[3:]
for (prefix, supplement) in _SUMMARY_PREFIX.items():
if text.startswith(prefix):
return supplement + text[len(prefix):]
return text
# cluster opcodes
class OpClusterPostInit(OpCode):
"""Post cluster initialization.
This opcode does not touch the cluster at all. Its purpose is to run hooks
after the cluster has been initialized.
"""
class OpClusterDestroy(OpCode):
"""Destroy the cluster.
This opcode has no other parameters. All the state is irreversibly
lost after the execution of this opcode.
"""
class OpClusterQuery(OpCode):
"""Query cluster information."""
class OpClusterVerifyConfig(OpCode):
"""Verify the cluster config.
"""
OP_PARAMS = [
("verbose", False, ht.TBool, None),
("error_codes", False, ht.TBool, None),
("debug_simulate_errors", False, ht.TBool, None),
]
class OpClusterVerifyGroup(OpCode):
"""Run verify on a node group from the cluster.
@type skip_checks: C{list}
@ivar skip_checks: steps to be skipped from the verify process; this
needs to be a subset of
L{constants.VERIFY_OPTIONAL_CHECKS}; currently
only L{constants.VERIFY_NPLUSONE_MEM} can be passed
"""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
("group_name", ht.NoDefault, ht.TNonEmptyString, None),
("skip_checks", ht.EmptyList,
ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS)), None),
("verbose", False, ht.TBool, None),
("error_codes", False, ht.TBool, None),
("debug_simulate_errors", False, ht.TBool, None),
]
class OpClusterVerifyDisks(OpCode):
"""Verify the cluster disks.
Parameters: none
Result: a tuple of four elements:
- list of node names with bad data returned (unreachable, etc.)
- dict of node names with broken volume groups (values: error msg)
- list of instances with degraded disks (that should be activated)
- dict of instances with missing logical volumes (values: (node, vol)
pairs with details about the missing volumes)
In normal operation, all lists should be empty. A non-empty instance
list (3rd element of the result) is still ok (errors were fixed) but
non-empty node list means some node is down, and probably there are
unfixable drbd errors.
Note that only instances that are drbd-based are taken into
consideration. This might need to be revisited in the future.
"""
class OpClusterRepairDiskSizes(OpCode):
"""Verify the disk sizes of the instances and fixes configuration
mimatches.
Parameters: optional instances list, in case we want to restrict the
checks to only a subset of the instances.
Result: a list of tuples, (instance, disk, new-size) for changed
configurations.
In normal operation, the list should be empty.
@type instances: list
@ivar instances: the list of instances to check, or empty for all instances
"""
OP_PARAMS = [
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
]
class OpClusterConfigQuery(OpCode):
"""Query cluster configuration values."""
OP_PARAMS = [
_POutputFields
]
class OpClusterRename(OpCode):
"""Rename the cluster.
@type name: C{str}
@ivar name: The new name of the cluster. The name and/or the master IP
address will be changed to match the new name and its IP
address.
"""
OP_DSC_FIELD = "name"
OP_PARAMS = [
("name", ht.NoDefault, ht.TNonEmptyString, None),
]
class OpClusterSetParams(OpCode):
"""Change the parameters of the cluster.
@type vg_name: C{str} or C{None}
@ivar vg_name: The new volume group name or None to disable LVM usage.
"""
OP_PARAMS = [
("vg_name", None, ht.TMaybeString, "Volume group name"),
("enabled_hypervisors", None,
ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
ht.TNone),
"List of enabled hypervisors"),
("hvparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide hypervisor parameter defaults, hypervisor-dependent"),
("beparams", None, ht.TOr(ht.TDict, ht.TNone),
"Cluster-wide backend parameter defaults"),
("os_hvp", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide per-OS hypervisor parameter defaults"),
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide OS parameter defaults"),
("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
"Set UID pool, must be list of lists describing UID ranges (two items,"
" start and end inclusive)"),
("add_uids", None, ht.NoType,
"Extend UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be added"),
("remove_uids", None, ht.NoType,
"Shrink UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be removed"),
("maintain_node_health", None, ht.TMaybeBool,
"Whether to automatically maintain node health"),
("prealloc_wipe_disks", None, ht.TMaybeBool,
"Whether to wipe disks before allocating them to instances"),
("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"),
("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"),
("drbd_helper", None, ht.TOr(ht.TString, ht.TNone), "DRBD helper program"),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone),
"Default iallocator for cluster"),
("master_netdev", None, ht.TOr(ht.TString, ht.TNone),
"Master network device"),
("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone),
"List of reserved LVs"),
("hidden_os", None, _TestClusterOsList,
"Modify list of hidden operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
("blacklisted_os", None, _TestClusterOsList,
"Modify list of blacklisted operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
]
class OpClusterRedistConf(OpCode):
"""Force a full push of the cluster configuration.
"""
class OpQuery(OpCode):
"""Query for resources/items.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
@ivar filter: Query filter
"""
OP_PARAMS = [
_PQueryWhat,
("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Requested fields"),
("filter", None, ht.TOr(ht.TNone, ht.TListOf),
"Query filter"),
]
class OpQueryFields(OpCode):
"""Query for available resource/item fields.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
"""
OP_PARAMS = [
_PQueryWhat,
("fields", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
"Requested fields; if not given, all are returned"),
]
class OpOobCommand(OpCode):
"""Interact with OOB."""
OP_PARAMS = [
("node_names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"List of nodes to run the OOB command against"),
("command", None, ht.TElemOf(constants.OOB_COMMANDS),
"OOB command to be run"),
("timeout", constants.OOB_TIMEOUT, ht.TInt,
"Timeout before the OOB helper will be terminated"),
("ignore_status", False, ht.TBool,
"Ignores the node offline status for power off"),
("power_delay", constants.OOB_POWER_DELAY, ht.TPositiveFloat,
"Time in seconds to wait between powering on nodes"),
]
# node opcodes
class OpNodeRemove(OpCode):
"""Remove a node.
@type node_name: C{str}
@ivar node_name: The name of the node to remove. If the node still has
instances on it, the operation will fail.
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
]
class OpNodeAdd(OpCode):
"""Add a node to the cluster.
@type node_name: C{str}
@ivar node_name: The name of the node to add. This can be a short name,
but it will be expanded to the FQDN.
@type primary_ip: IP address
@ivar primary_ip: The primary IP of the node. This will be ignored when the
opcode is submitted, but will be filled during the node
add (so it will be visible in the job query).
@type secondary_ip: IP address
@ivar secondary_ip: The secondary IP of the node. This needs to be passed
if the cluster has been initialized in 'dual-network'
mode, otherwise it must not be given.
@type readd: C{bool}
@ivar readd: Whether to re-add an existing node to the cluster. If
this is not passed, then the operation will abort if the node
name is already in the cluster; use this parameter to 'repair'
a node that had its configuration broken, or was reinstalled
without removal from the cluster.
@type group: C{str}
@ivar group: The node group to which this node will belong.
@type vm_capable: C{bool}
@ivar vm_capable: The vm_capable node attribute
@type master_capable: C{bool}
@ivar master_capable: The master_capable node attribute
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
("primary_ip", None, ht.NoType, "Primary IP address"),
("secondary_ip", None, ht.TMaybeString, "Secondary IP address"),
("readd", False, ht.TBool, "Whether node is re-added to cluster"),
("group", None, ht.TMaybeString, "Initial node group"),
("master_capable", None, ht.TMaybeBool,
"Whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Whether node can host instances"),
("ndparams", None, ht.TMaybeDict, "Node parameters"),
]
class OpNodeQuery(OpCode):
"""Compute the list of nodes."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpNodeQueryvols(OpCode):
"""Get list of volumes on node."""
OP_PARAMS = [
_POutputFields,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpNodeQueryStorage(OpCode):
"""Get information on storage for node(s)."""
OP_PARAMS = [
_POutputFields,
_PStorageType,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "List of nodes"),
("name", None, ht.TMaybeString, "Storage name"),
]
class OpNodeModifyStorage(OpCode):
"""Modifies the properies of a storage unit"""
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
("changes", ht.NoDefault, ht.TDict, "Requested changes"),
]
class OpRepairNodeStorage(OpCode):
"""Repairs the volume group on a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
_PIgnoreConsistency,
]
class OpNodeSetParams(OpCode):
"""Change the parameters of a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
("master_candidate", None, ht.TMaybeBool,
"Whether the node should become a master candidate"),
("offline", None, ht.TMaybeBool,
"Whether the node should be marked as offline"),
("drained", None, ht.TMaybeBool,
"Whether the node should be marked as drained"),
("auto_promote", False, ht.TBool,
"Whether node(s) should be promoted to master candidate if necessary"),
("master_capable", None, ht.TMaybeBool,
"Denote whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Denote whether node can host instances"),
("secondary_ip", None, ht.TMaybeString,
"Change node's secondary IP address"),
("ndparams", None, ht.TMaybeDict, "Set node parameters"),
("powered", None, ht.TMaybeBool,
"Whether the node should be marked as powered"),
]
class OpNodePowercycle(OpCode):
"""Tries to powercycle a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
]
class OpNodeMigrate(OpCode):
"""Migrate all instances from a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
class OpNodeEvacStrategy(OpCode):
"""Compute the evacuation strategy for a list of nodes."""
OP_DSC_FIELD = "nodes"
OP_PARAMS = [
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None),
("remote_node", None, ht.TMaybeString, None),
("iallocator", None, ht.TMaybeString, None),
]
# instance opcodes
class OpInstanceCreate(OpCode):
"""Create an instance.
@ivar instance_name: Instance name
@ivar mode: Instance creation mode (one of L{constants.INSTANCE_CREATE_MODES})
@ivar source_handshake: Signed handshake from source (remote import only)
@ivar source_x509_ca: Source X509 CA in PEM format (remote import only)
@ivar source_instance_name: Previous name of instance (remote import only)
@ivar source_shutdown_timeout: Shutdown timeout used for source instance
(remote import only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
_PWaitForSync,
_PNameCheck,
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
("disks", ht.NoDefault,
# TODO: Generate check from constants.IDISK_PARAMS_TYPES
ht.TListOf(ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
ht.TOr(ht.TNonEmptyString, ht.TInt))),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
" (%s)" %
(constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_SIZE,
constants.IDISK_MODE,
" or ".join("``%s``" % i for i in sorted(constants.DISK_ACCESS_SET)))),
("disk_template", ht.NoDefault, _CheckDiskTemplate, "Disk template"),
("file_driver", None, ht.TOr(ht.TNone, ht.TElemOf(constants.FILE_DRIVER)),
"Driver for file-backed disks"),
("file_storage_dir", None, ht.TMaybeString,
"Directory for storing file-backed disks"),
("hvparams", ht.EmptyDict, ht.TDict,
"Hypervisor parameters for instance, hypervisor-dependent"),
("hypervisor", None, ht.TMaybeString, "Hypervisor"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding which node(s) to use"),
("identify_defaults", False, ht.TBool,
"Reset instance parameters to default if equal"),
("ip_check", True, ht.TBool, _PIpCheckDoc),
("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES),
"Instance creation mode"),
("nics", ht.NoDefault, ht.TListOf(_TestNicDef),
"List of NIC (network interface) definitions, for example"
" ``[{}, {}, {\"%s\": \"198.51.100.4\"}]``; each NIC definition can"
" contain the optional values %s" %
(constants.INIC_IP,
", ".join("``%s``" % i for i in sorted(constants.INIC_PARAMS)))),
("no_install", None, ht.TMaybeBool,
"Do not install the OS (will disable automatic start)"),
("osparams", ht.EmptyDict, ht.TDict, "OS parameters for instance"),
("os_type", None, ht.TMaybeString, "Operating system"),
("pnode", None, ht.TMaybeString, "Primary node"),
("snode", None, ht.TMaybeString, "Secondary node"),
("source_handshake", None, ht.TOr(ht.TList, ht.TNone),
"Signed handshake from source (remote import only)"),
("source_instance_name", None, ht.TMaybeString,
"Source instance name (remote import only)"),
("source_shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
ht.TPositiveInt,
"How long source instance was given to shut down (remote import only)"),
("source_x509_ca", None, ht.TMaybeString,
"Source X509 CA in PEM format (remote import only)"),
("src_node", None, ht.TMaybeString, "Source node for import"),
("src_path", None, ht.TMaybeString, "Source directory for import"),
("start", True, ht.TBool, "Whether to start instance after creation"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"),
]
class OpInstanceReinstall(OpCode):
"""Reinstall an instance's OS."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
("os_type", None, ht.TMaybeString, "Instance operating system"),
("osparams", None, ht.TMaybeDict, "Temporary OS parameters"),
]
class OpInstanceRemove(OpCode):
"""Remove an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_failures", False, ht.TBool,
"Whether to ignore failures during removal"),
]
class OpInstanceRename(OpCode):
"""Rename an instance."""
OP_PARAMS = [
_PInstanceName,
_PNameCheck,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New instance name"),
("ip_check", False, ht.TBool, _PIpCheckDoc),
]
class OpInstanceStartup(OpCode):
"""Startup an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PIgnoreOfflineNodes,
("hvparams", ht.EmptyDict, ht.TDict,
"Temporary hypervisor parameters, hypervisor-dependent"),
("beparams", ht.EmptyDict, ht.TDict, "Temporary backend parameters"),
_PNoRemember,
]
class OpInstanceShutdown(OpCode):
"""Shutdown an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PIgnoreOfflineNodes,
("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down"),
_PNoRemember,
]
class OpInstanceReboot(OpCode):
"""Reboot an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_secondaries", False, ht.TBool,
"Whether to start the instance even if secondary disks are failing"),
("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES),
"How to reboot instance"),
]
class OpInstanceReplaceDisks(OpCode):
"""Replace the disks of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES),
"Replacement mode"),
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"Disk indexes"),
("remote_node", None, ht.TMaybeString, "New secondary node"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding new secondary node"),
("early_release", False, ht.TBool,
"Whether to release locks as soon as possible"),
]
class OpInstanceFailover(OpCode):
"""Failover an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
class OpInstanceMigrate(OpCode):
"""Migrate an instance.
This migrates (without shutting down an instance) to its secondary
node.
@ivar instance_name: the name of the instance
@ivar mode: the migration mode (live, non-live or None for auto)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
("cleanup", False, ht.TBool,
"Whether a previously failed migration should be cleaned up"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
("allow_failover", False, ht.TBool,
"Whether we can fallback to failover if migration is not possible"),
]
class OpInstanceMove(OpCode):
"""Move an instance.
This move (with shutting down an instance and data copying) to an
arbitrary node.
@ivar instance_name: the name of the instance
@ivar target_node: the destination node
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"),
_PIgnoreConsistency,
]
class OpInstanceConsole(OpCode):
"""Connect to an instance's console."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName
]
class OpInstanceActivateDisks(OpCode):
"""Activate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("ignore_size", False, ht.TBool, "Whether to ignore recorded size"),
]
class OpInstanceDeactivateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
]
class OpInstanceRecreateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"List of disk indexes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
]
class OpInstanceQuery(OpCode):
"""Compute the list of instances."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all instances, instance names otherwise"),
]
class OpInstanceQueryData(OpCode):
"""Compute the run-time status of instances."""
OP_PARAMS = [
_PUseLocking,
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Instance names"),
("static", False, ht.TBool,
"Whether to only return configuration data without querying"
" nodes"),
]
class OpInstanceSetParams(OpCode):
"""Change the parameters of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PForceVariant,
# TODO: Use _TestNicDef
("nics", ht.EmptyList, ht.TList,
"List of NIC changes. Each item is of the form ``(op, settings)``."
" ``op`` can be ``%s`` to add a new NIC with the specified settings,"
" ``%s`` to remove the last NIC or a number to modify the settings"
" of the NIC with that index." %
(constants.DDM_ADD, constants.DDM_REMOVE)),
("disks", ht.EmptyList, ht.TList, "List of disk changes. See ``nics``."),
("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"),
("hvparams", ht.EmptyDict, ht.TDict,
"Per-instance hypervisor parameters, hypervisor-dependent"),
("disk_template", None, ht.TOr(ht.TNone, _CheckDiskTemplate),
"Disk template for instance"),
("remote_node", None, ht.TMaybeString,
"Secondary node (used when changing disk template)"),
("os_name", None, ht.TMaybeString,
"Change instance's OS name. Does not reinstall the instance."),
("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize, when changing template"),
]
class OpInstanceGrowDisk(OpCode):
"""Grow a disk of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PWaitForSync,
("disk", ht.NoDefault, ht.TInt, "Disk index"),
("amount", ht.NoDefault, ht.TInt,
"Amount of disk space to add (megabytes)"),
]
# Node group opcodes
class OpGroupAdd(OpCode):
"""Add a node group to the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
]
class OpGroupAssignNodes(OpCode):
"""Assign nodes to a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PForce,
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"List of nodes to assign"),
]
class OpGroupQuery(OpCode):
"""Compute the list of node groups."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all groups, group names otherwise"),
]
class OpGroupSetParams(OpCode):
"""Change the parameters of a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
]
class OpGroupRemove(OpCode):
"""Remove a node group from the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
]
class OpGroupRename(OpCode):
"""Rename a node group in the cluster."""
OP_PARAMS = [
_PGroupName,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New group name"),
]
# OS opcodes
class OpOsDiagnose(OpCode):
"""Compute the list of guest operating systems."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Which operating systems to diagnose"),
]
# Exports opcodes
class OpBackupQuery(OpCode):
"""Compute the list of exported images."""
OP_PARAMS = [
_PUseLocking,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpBackupPrepare(OpCode):
"""Prepares an instance export.
@ivar instance_name: Instance name
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
]
class OpBackupExport(OpCode):
"""Export an instance.
For local exports, the export destination is the node name. For remote
exports, the export destination is a list of tuples, each consisting of
hostname/IP address, port, HMAC and HMAC salt. The HMAC is calculated using
the cluster domain secret over the value "${index}:${hostname}:${port}". The
destination X509 CA must be a signed certificate.
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
@ivar target_node: Export destination
@ivar x509_key_name: X509 key to use (remote export only)
@ivar destination_x509_ca: Destination X509 CA in PEM format (remote export
only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
# TODO: Rename target_node as it changes meaning for different export modes
# (e.g. "destination")
("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList),
"Destination information, depends on export mode"),
("shutdown", True, ht.TBool, "Whether to shutdown instance before export"),
("remove_instance", False, ht.TBool,
"Whether to remove instance after export"),
("ignore_remove_failures", False, ht.TBool,
"Whether to ignore failures while removing instances"),
("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
("x509_key_name", None, ht.TOr(ht.TList, ht.TNone),
"Name of X509 key (remote export only)"),
("destination_x509_ca", None, ht.TMaybeString,
"Destination X509 CA (remote export only)"),
]
class OpBackupRemove(OpCode):
"""Remove an instance's export."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
]
# Tags opcodes
class OpTagsGet(OpCode):
"""Returns the tags of the given object."""
OP_DSC_FIELD = "name"
OP_PARAMS = [
_PTagKind,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTagsSearch(OpCode):
"""Searches the tags in the cluster for a given pattern."""
OP_DSC_FIELD = "pattern"
OP_PARAMS = [
("pattern", ht.NoDefault, ht.TNonEmptyString, None),
]
class OpTagsSet(OpCode):
"""Add a list of tags on a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTagsDel(OpCode):
"""Remove a list of tags from a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
# Test opcodes
class OpTestDelay(OpCode):
"""Sleeps for a configured amount of time.
This is used just for debugging and testing.
Parameters:
- duration: the time to sleep
- on_master: if true, sleep on the master
- on_nodes: list of nodes in which to sleep
If the on_master parameter is true, it will execute a sleep on the
master (before any node sleep).
If the on_nodes list is not empty, it will sleep on those nodes
(after the sleep on the master, if that is enabled).
As an additional feature, the case of duration < 0 will be reported
as an execution error, so this opcode can be used as a failure
generator. The case of duration == 0 will not be treated specially.
"""
OP_DSC_FIELD = "duration"
OP_PARAMS = [
("duration", ht.NoDefault, ht.TFloat, None),
("on_master", True, ht.TBool, None),
("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("repeat", 0, ht.TPositiveInt, None),
]
class OpTestAllocator(OpCode):
"""Allocator framework testing.
This opcode has two modes:
- gather and return allocator input for a given mode (allocate new
or replace secondary) and a given instance definition (direction
'in')
- run a selected allocator for a given operation (as above) and
return the allocator output (direction 'out')
"""
OP_DSC_FIELD = "allocator"
OP_PARAMS = [
("direction", ht.NoDefault,
ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS), None),
("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES), None),
("name", ht.NoDefault, ht.TNonEmptyString, None),
("nics", ht.NoDefault, ht.TOr(ht.TNone, ht.TListOf(
ht.TDictOf(ht.TElemOf([constants.INIC_MAC, constants.INIC_IP, "bridge"]),
ht.TOr(ht.TNone, ht.TNonEmptyString)))), None),
("disks", ht.NoDefault, ht.TOr(ht.TNone, ht.TList), None),
("hypervisor", None, ht.TMaybeString, None),
("allocator", None, ht.TMaybeString, None),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("memory", None, ht.TOr(ht.TNone, ht.TPositiveInt), None),
("vcpus", None, ht.TOr(ht.TNone, ht.TPositiveInt), None),
("os", None, ht.TMaybeString, None),
("disk_template", None, ht.TMaybeString, None),
("evac_nodes", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
("instances", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
("evac_mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)), None),
("target_groups", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
]
class OpTestJqueue(OpCode):
"""Utility opcode to test some aspects of the job queue.
"""
OP_PARAMS = [
("notify_waitlock", False, ht.TBool, None),
("notify_exec", False, ht.TBool, None),
("log_messages", ht.EmptyList, ht.TListOf(ht.TString), None),
("fail", False, ht.TBool, None),
]
class OpTestDummy(OpCode):
"""Utility opcode used by unittests.
"""
OP_PARAMS = [
("result", ht.NoDefault, ht.NoType, None),
("messages", ht.NoDefault, ht.NoType, None),
("fail", ht.NoDefault, ht.NoType, None),
("submit_jobs", None, ht.NoType, None),
]
WITH_LU = False
def _GetOpList():
"""Returns list of all defined opcodes.
Does not eliminate duplicates by C{OP_ID}.
"""
return [v for v in globals().values()
if (isinstance(v, type) and issubclass(v, OpCode) and
hasattr(v, "OP_ID") and v is not OpCode)]
OP_MAPPING = dict((v.OP_ID, v) for v in _GetOpList())
|
ekohl/ganeti
|
lib/opcodes.py
|
Python
|
gpl-2.0
| 47,432 | 0.005081 |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('run_workflow')
@click.argument("workflow_id", type=str)
@click.option(
"--dataset_map",
help="A mapping of workflow inputs to datasets. The datasets source can be a LibraryDatasetDatasetAssociation (``ldda``), LibraryDataset (``ld``), or HistoryDatasetAssociation (``hda``). The map must be in the following format: ``{'<input>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda]'}}`` (e.g. ``{'23': {'id': '29beef4fadeed09f', 'src': 'ld'}}``)",
type=str
)
@click.option(
"--params",
help="A mapping of non-datasets tool parameters (see below)",
type=str
)
@click.option(
"--history_id",
help="The encoded history ID where to store the workflow output. Alternatively, ``history_name`` may be specified to create a new history.",
type=str
)
@click.option(
"--history_name",
help="Create a new history with the given name to store the workflow output. If both ``history_id`` and ``history_name`` are provided, ``history_name`` is ignored. If neither is specified, a new 'Unnamed history' is created.",
type=str
)
@click.option(
"--import_inputs_to_history",
help="If ``True``, used workflow inputs will be imported into the history. If ``False``, only workflow outputs will be visible in the given history.",
is_flag=True
)
@click.option(
"--replacement_params",
help="pattern-based replacements for post-job actions (see below)",
type=str
)
@pass_context
@custom_exception
@json_output
def cli(ctx, workflow_id, dataset_map="", params="", history_id="", history_name="", import_inputs_to_history=False, replacement_params=""):
"""Run the workflow identified by ``workflow_id``.
Output:
A dict containing the history ID where the outputs are placed
as well as output dataset IDs. For example::
{'history': '64177123325c9cfd',
'outputs': ['aa4d3084af404259']}
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
The ``replacement_params`` dict should map parameter names in
post-job actions (PJAs) to their runtime values. For
instance, if the final step has a PJA like the following::
{'RenameDatasetActionout_file1': {'action_arguments': {'newname': '${output}'},
'action_type': 'RenameDatasetAction',
'output_name': 'out_file1'}}
then the following renames the output dataset to 'foo'::
replacement_params = {'output': 'foo'}
see also `this email thread
<http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_.
.. warning::
This method waits for the whole workflow to be scheduled before
returning and does not scale to large workflows as a result. This
method has therefore been deprecated in favor of
:meth:`invoke_workflow`, which also features improved default
behavior for dataset input handling.
"""
return ctx.gi.workflows.run_workflow(workflow_id, dataset_map=dataset_map, params=params, history_id=history_id, history_name=history_name, import_inputs_to_history=import_inputs_to_history, replacement_params=replacement_params)
|
galaxy-iuc/parsec
|
parsec/commands/workflows/run_workflow.py
|
Python
|
apache-2.0
| 4,029 | 0.001986 |
import copy
import secrets
races = {}
colors = {
'🐶': 0xccd6dd,
'🐱': 0xffcb4e,
'🐭': 0x99aab5,
'🐰': 0x99aab5,
'🐙': 0x9266cc,
'🐠': 0xffcc4d,
'🦊': 0xf4900c,
'🦀': 0xbe1931,
'🐸': 0x77b255,
'🐧': 0xf5f8fa
}
names = {
'🐶': 'dog',
'🐱': 'cat',
'🐭': 'mouse',
'🐰': 'rabbit',
'🐙': 'octopus',
'🐠': 'fish',
'🦊': 'fox',
'🦀': 'crab',
'🐸': 'frog',
'🐧': 'penguin'
}
participant_icons = ['🐶', '🐱', '🐭', '🐰', '🐙', '🐠', '🦊', '🦀', '🐸', '🐧']
def make_race(channel_id, buyin):
icon_copy = copy.deepcopy(participant_icons)
race_data = {
'icons': icon_copy,
'users': [],
'buyin': buyin
}
races.update({channel_id: race_data})
def add_participant(channel_id, user):
race = races[channel_id]
icons = race['icons']
users = race['users']
usr_icon = secrets.choice(icons)
icons.remove(usr_icon)
race.update({'icons': icons})
participant_data = {
'user': user,
'icon': usr_icon
}
users.append(participant_data)
race.update({'users': users})
races.update({channel_id: race})
return usr_icon
|
AXAz0r/apex-sigma-core
|
sigma/modules/minigames/racing/nodes/race_storage.py
|
Python
|
gpl-3.0
| 1,238 | 0 |
#!/usr/bin/python3
import tornado.ioloop
import tornado.options
import tornado.httpserver
from routes import routes_setup
import settings
clients = []
if __name__ == '__main__':
tornado.options.parse_command_line()
tornado.options.define('log_file_max_size', default=str(10*1024*1024))
tornado.options.define('log_file_prefix', default='router.log')
app = tornado.web.Application(routes_setup(), **settings.settings)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(settings.PORT)
tornado.ioloop.IOLoop.current().start()
|
andrewisakov/taximaster_x
|
router/main.py
|
Python
|
unlicense
| 572 | 0 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EmailShareStatistic'
db.create_table('statistic_emailsharestatistic', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True)),
))
db.send_create_signal('statistic', ['EmailShareStatistic'])
# Adding model 'TweeterShareStatistic'
db.create_table('statistic_tweetersharestatistic', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True)),
))
db.send_create_signal('statistic', ['TweeterShareStatistic'])
# Adding model 'FBShareStatistic'
db.create_table('statistic_fbsharestatistic', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True)),
))
db.send_create_signal('statistic', ['FBShareStatistic'])
# Adding model 'SubtitleFetchStatistic'
db.create_table('statistic_subtitlefetchstatistic', (
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['videos.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('statistic', ['SubtitleFetchStatistic'])
def backwards(self, orm):
# Deleting model 'EmailShareStatistic'
db.delete_table('statistic_emailsharestatistic')
# Deleting model 'TweeterShareStatistic'
db.delete_table('statistic_tweetersharestatistic')
# Deleting model 'FBShareStatistic'
db.delete_table('statistic_fbsharestatistic')
# Deleting model 'SubtitleFetchStatistic'
db.delete_table('statistic_subtitlefetchstatistic')
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'statistic.emailsharestatistic': {
'Meta': {'object_name': 'EmailShareStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'statistic.fbsharestatistic': {
'Meta': {'object_name': 'FBShareStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'statistic.subtitlefetchstatistic': {
'Meta': {'object_name': 'SubtitleFetchStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'statistic.tweetersharestatistic': {
'Meta': {'object_name': 'TweeterShareStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['statistic']
|
ujdhesa/unisubs
|
apps/statistic/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 10,707 | 0.008219 |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for evaluating results on Cityscapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import logging
import os
import uuid
import pycocotools.mask as mask_util
from detectron.core.config import cfg
from detectron.datasets.dataset_catalog import get_raw_dir
logger = logging.getLogger(__name__)
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
if cfg.CLUSTER.ON_CLUSTER:
# On the cluster avoid saving these files in the job directory
output_dir = '/tmp'
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
results_dir = os.path.join(output_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
os.environ['CITYSCAPES_DATASET'] = get_raw_dir(json_dataset.name)
os.environ['CITYSCAPES_RESULTS'] = output_dir
# Load the Cityscapes eval script *after* setting the required env vars,
# since the script reads their values into global variables (at load time).
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling \
as cityscapes_eval
roidb = json_dataset.get_roidb()
for i, entry in enumerate(roidb):
im_name = entry['image']
basename = os.path.splitext(os.path.basename(im_name))[0]
txtname = os.path.join(output_dir, basename + 'pred.txt')
with open(txtname, 'w') as fid_txt:
if i % 10 == 0:
logger.info('i: {}: {}'.format(i, basename))
for j in range(1, len(all_segms)):
clss = json_dataset.classes[j]
clss_id = cityscapes_eval.name2label[clss].id
segms = all_segms[j][i]
boxes = all_boxes[j][i]
if segms == []:
continue
masks = mask_util.decode(segms)
for k in range(boxes.shape[0]):
score = boxes[k, -1]
mask = masks[:, :, k]
pngname = os.path.join(
'results',
basename + '_' + clss + '_{}.png'.format(k))
# write txt
fid_txt.write('{} {} {}\n'.format(pngname, clss_id, score))
# save mask
cv2.imwrite(os.path.join(output_dir, pngname), mask * 255)
logger.info('Evaluating...')
cityscapes_eval.main([])
return None
|
facebookresearch/Detectron
|
detectron/datasets/cityscapes_json_dataset_evaluator.py
|
Python
|
apache-2.0
| 3,355 | 0 |
'''
New Integration Test for hybrid.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
test_obj_dict = test_state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
hybrid = test_stub.HybridObject()
def test():
hybrid.add_datacenter_iz()
hybrid.del_iz()
test_util.test_pass('Add Delete Identity Zone Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/hybrid/test_add_iz.py
|
Python
|
apache-2.0
| 623 | 0.004815 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
"""Provides a shared preferences dictionary"""
from desktopcouch.records.server import CouchDatabase
from desktopcouch.records.record import Record
import gtk
import gobject
class User_dict(dict):
''' a dictionary with extra methods:
persistence: load, save and db_connect
gobject signals: connect and emit.
Don't use this directly. Please use the preferences instance.'''
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
# Set up couchdb.
self._db_name = "brazo"
self._key = None
self._database = None
self._record_type = (
"http://wiki.ubuntu.com/Quickly/RecordTypes/Brazo/"
"Preferences")
class Publisher(gtk.Invisible): # pylint: disable=R0904
'''set up signals in a separate class
gtk.Invisible has 230 public methods'''
__gsignals__ = {'changed' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
'loaded' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,))}
publisher = Publisher()
self.emit = publisher.emit
self.connect = publisher.connect
def db_connect(self):
'''connect to couchdb
create if necessary'''
# logging.basicConfig will be called now
self._database = CouchDatabase(self._db_name, create=True)
def save(self):
'save to couchdb'
self._database.update_fields(self._key, self)
def load(self):
'load from couchdb'
self.update({"record_type": self._record_type})
results = self._database.get_records(
record_type=self._record_type, create_view=True)
if len(results.rows) == 0:
# No preferences have ever been saved
# save them before returning.
self._key = self._database.put_record(Record(self))
else:
self.update(results.rows[0].value)
del self['_rev']
self._key = results.rows[0].value["_id"]
self.emit('loaded', None)
def update(self, *args, **kwds):
''' interface for dictionary
send changed signal when appropriate '''
# parse args
new_data = {}
new_data.update(*args, **kwds)
changed_keys = []
for key in new_data.keys():
if new_data.get(key) != dict.get(self, key):
changed_keys.append(key)
dict.update(self, new_data)
if changed_keys:
self.emit('changed', tuple(changed_keys))
def __setitem__(self, key, value):
''' interface for dictionary
send changed signal when appropriate '''
if value != dict.get(self, key):
dict.__setitem__(self, key, value)
self.emit('changed', (key,))
preferences = User_dict()
|
nickpascucci/Robot-Arm
|
software/desktop/brazo/brazo_lib/preferences.py
|
Python
|
mit
| 3,116 | 0.007702 |
import sqlite3
import os
def init():
"""
Creates and initializes settings database.
Doesn't do anything if the file already exists. Remove the local copy to recreate the database.
"""
if not os.path.isfile("settings.sqlite"):
app_db_connection = sqlite3.connect('settings.sqlite')
app_db = app_db_connection.cursor()
app_db.execute("CREATE TABLE oauth (site, rate_remaining, rate_reset)")
app_db.execute("INSERT INTO oauth VALUES ('reddit', 30, 60)")
app_db_connection.commit()
app_db_connection.close()
if __name__ == "__main__":
init()
|
Pasotaku/Anime-Feud-Survey-Backend
|
Old Python Code/settings_db_init.py
|
Python
|
mit
| 617 | 0.001621 |
"""autogenerated by genpy from outdoor_bot/mainTargetsCommand_msg.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class mainTargetsCommand_msg(genpy.Message):
_md5sum = "b1faa92c8dffb7694c609d94e4e2d116"
_type = "outdoor_bot/mainTargetsCommand_msg"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 cameraName
int32 regularDigcamZoom
int32 zoomDigcamZoom
int32 homeDigcamZoom
float32 approxRange
bool firstTarget
"""
__slots__ = ['cameraName','regularDigcamZoom','zoomDigcamZoom','homeDigcamZoom','approxRange','firstTarget']
_slot_types = ['int32','int32','int32','int32','float32','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
cameraName,regularDigcamZoom,zoomDigcamZoom,homeDigcamZoom,approxRange,firstTarget
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(mainTargetsCommand_msg, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.cameraName is None:
self.cameraName = 0
if self.regularDigcamZoom is None:
self.regularDigcamZoom = 0
if self.zoomDigcamZoom is None:
self.zoomDigcamZoom = 0
if self.homeDigcamZoom is None:
self.homeDigcamZoom = 0
if self.approxRange is None:
self.approxRange = 0.
if self.firstTarget is None:
self.firstTarget = False
else:
self.cameraName = 0
self.regularDigcamZoom = 0
self.zoomDigcamZoom = 0
self.homeDigcamZoom = 0
self.approxRange = 0.
self.firstTarget = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_4ifB.pack(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 21
(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget,) = _struct_4ifB.unpack(str[start:end])
self.firstTarget = bool(self.firstTarget)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_4ifB.pack(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 21
(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget,) = _struct_4ifB.unpack(str[start:end])
self.firstTarget = bool(self.firstTarget)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4ifB = struct.Struct("<4ifB")
|
dan-git/outdoor_bot
|
src/outdoor_bot/msg/_mainTargetsCommand_msg.py
|
Python
|
bsd-2-clause
| 4,588 | 0.017873 |
import jinja2
from jingo import register
from tower import ugettext_lazy as _lazy
from mkt.site.helpers import page_title
@register.function
@jinja2.contextfunction
def operators_page_title(context, title=None):
section = _lazy('Operator Dashboard')
title = u'%s | %s' % (title, section) if title else section
return page_title(context, title)
|
jamesthechamp/zamboni
|
mkt/operators/helpers.py
|
Python
|
bsd-3-clause
| 360 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
group_and_permission = [
("超级管理员", [], ['all']),
("运营部", ['Can change teacher'],
[
# 待上架, 已上架老师, 老师编辑
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 可查看课表, 不能调课停课, 可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
'orders_action',
# 课程列表, 投诉, 考勤
'school_timeslot',
# 中心设置, 编辑
'schools',
'staff_school',
# 订单查看, 申请退费
'orders_review',
'orders_action',
# 奖学金设置, 领用列表
'coupon_config',
'coupons_list',
]),
("财务主管", [],
[
# 教师银行卡查询
'teachers_bankcard_list',
# 订单查看, 申请退费, 审核
'orders_review',
'orders_refund',
'orders_action',
# 老师收入列表, 收入明细, 提现审核
'teachers_income_list',
'teachers_income_detail',
'teachers_withdrawal_list',
# 奖学金, 领用列表
'coupons_list',
# 校区收入记录(财务)
'school_income_audit',
'school_income_audit_v2',
]),
("会计出纳", [],
[
# 订单查看, 申请退费, 审核
'orders_review',
'orders_refund',
'orders_action',
# 老师收入列表, 收入明细, 提现审核
'teachers_income_list',
'teachers_income_detail',
'teachers_withdrawal_list',
# 奖学金, 领用列表
'coupons_list',
]),
("中心主任", ['Can change teacher'],
[
# 新注册, 待上架, 已上架老师, 老师编辑
'teachers',
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 查看课表, 调课停课(操作和记录), 可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
'student_schedule_action',
'student_schedule_changelog',
'orders_action',
# 课程列表, 投诉, 考勤
'school_timeslot',
# 中心设置, 编辑
'schools',
'staff_school',
# 订单查看, 申请退费
'orders_review',
'orders_action',
# 老师收入列表, 收入明细
'teachers_income_list',
'teachers_income_detail',
# 奖学金设置, 领用列表
'coupon_config',
'coupons_list',
# 修改密码
'staff_auth',
# 校区收入记录
'school_income_records',
'school_income_records_v2',
# 阶梯价格设置(一对一)
'school_price_cfg',
# 校区账户信息
'school_account_info',
'school_account_info_v2',
]),
("教师委员会主任", ['Can change teacher'],
[
# 新注册, 待上架, 已上架老师, 老师编辑
'teachers',
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 只能查看课表, 不能调课停课, 也不可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
# 课程列表
# todo: 投诉, 考勤 也在这个页面, 因此目前是允许的
'school_timeslot',
]),
("教师委员会主任助理", ['Can change teacher'],
[
# 新注册, 待上架, 已上架老师, 老师编辑
'teachers',
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 只能查看课表, 不能调课停课, 也不可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
]),
("学习顾问", [],
[
# 测评建档, 查看课表, 调课停课(操作和记录), 不可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
'student_schedule_action',
'student_schedule_changelog',
# 课程列表
# todo: 投诉, 考勤 也在这个页面, 因此目前是允许的
'school_timeslot',
]),
("社区店长", [],
[
# 课程列表
# todo: 目前查看所有中心课程
# todo: 投诉, 考勤 也在这个页面, 因此目前是允许的
'school_timeslot',
# 修改密码
'staff_auth',
]),
("老师", [], []),
("家长", ['Can change parent', 'Can change profile'], []),
("学生", [], []),
# todo: 师资管理员暂时只作为登录后台
("师资管理员", ['Can change teacher'], [])
]
def _add_test_user_into_group(apps, test_user_format, count, group_name,
newUserData=None):
Group = apps.get_model('auth', 'Group')
User = apps.get_model('auth', 'User')
user_group = Group.objects.get(name=group_name)
for i in range(count):
username = test_user_format.format(id=i)
try:
if newUserData:
user, created = User.objects.get_or_create(
username=username, defaults=newUserData)
else:
user = User.objects.get(username=username)
except User.DoesNotExist:
#print("{user} not exist".format(user=test_user_format))
continue
user.groups.add(user_group)
def change_user_group(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
Group = apps.get_model('auth', 'Group')
StaffPermission = apps.get_model('app', 'StaffPermission')
global group_and_permission
for group_name, permission_list, allowed_url_names in group_and_permission:
new_group, group_create = Group.objects.get_or_create(name=group_name)
new_group.save()
new_group.staffpermission_set.clear()
for url_name in allowed_url_names:
new_url_name, created = StaffPermission.objects.get_or_create(
allowed_url_name=url_name)
new_group.staffpermission_set.add(new_url_name)
new_group.permissions.clear()
for permission_name in permission_list:
permission = Permission.objects.get(name=permission_name)
new_group.permissions.add(permission)
_add_test_user_into_group(apps, 'test', 1, '超级管理员')
class Migration(migrations.Migration):
dependencies = [
('app', '0185_livecoursetimeslot_mistakes_pushed'),
]
operations = [
migrations.RunPython(change_user_group),
]
|
malaonline/Server
|
server/app/migrations/0186_change_user_group.py
|
Python
|
mit
| 7,213 | 0.000161 |
from unittest import TestCase
from intent.igt.rgxigt import RGIgt
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
inst = RGIgt(id='i1')
inst.add_raw_tier(self.lines)
self.assertEqual(len(inst.raw_tier()), 2)
def test_add_clean_lines(self):
inst = RGIgt(id='i1')
inst.add_clean_tier(self.lines)
self.assertEqual(len(inst.clean_tier()), 2)
def test_add_norm_lines(self):
inst = RGIgt(id='i1')
inst.add_clean_tier(self.lines)
self.assertEqual(len(inst.clean_tier()), 2)
|
rgeorgi/intent
|
intent/tests/instance_construction_tests.py
|
Python
|
mit
| 718 | 0.009749 |
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corporation / Hildo Guillardi Júnior
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'XESS Corporation'
__email__ = 'info@xess.com'
from .distributor import distributor_class
# Export the ORDER_COL_USERFIELDS content
from .distributors_info import ORDER_COL_USERFIELDS # noqa: F401
# Import and register here the API / local / scrape modules.
from .dist_local_template import dist_local_template # noqa: F401
from .api_octopart import api_octopart # noqa: F401
from .api_partinfo_kitspace import api_partinfo_kitspace # noqa: F401
#
# Some wrappers
#
def init_distributor_dict():
distributor_class.init_dist_dict()
def get_dist_parts_info(parts, dist_list, currency):
distributor_class.get_dist_parts_info(parts, dist_list, currency)
def get_registered_apis():
return distributor_class.registered
def get_distributors_list():
''' List of distributors registered by the API modules '''
return list(distributor_class.get_distributors_iter())
def get_distributors_iter():
''' Iterator for the distributors registered by the API modules '''
return distributor_class.get_distributors_iter()
def get_distributor_info(name):
''' Gets all the information about a supported distributor.
This information comes from the list collected from the APIs, not from the fixed template. '''
return distributor_class.get_distributor_info(name)
def get_dist_name_from_label(label):
''' Returns the internal distributor name for a provided label. '''
return distributor_class.label2name.get(label.lower())
def set_distributors_logger(logger):
''' Sets the logger used by the class '''
distributor_class.logger = logger
def set_distributors_progress(cls):
''' Configures the class used to indicate progress '''
distributor_class.progress = cls
def set_api_options(api, **kwargs):
''' Configure an API (by name) '''
distributor_class.set_api_options(api, **kwargs)
def set_api_status(api, enabled):
''' Enable/Disable a particular API '''
distributor_class.set_api_status(api, enabled)
def get_api_status(api):
''' Find if an API is enabled '''
return distributor_class.get_api_status(api)
# Init distributor dict during import.
init_distributor_dict()
|
xesscorp/KiCost
|
kicost/distributors/__init__.py
|
Python
|
mit
| 3,347 | 0.000299 |
import subprocess
import shutil
from unittest import TestCase
if shutil.which('flake8'):
class TestCodeQuality(TestCase):
def test_flake8(self):
self.assertEqual(0, subprocess.call('flake8'))
else:
print("I: skipping flake8 test (flake8 not available)")
|
Linaro/squad
|
test/test_code_quality.py
|
Python
|
agpl-3.0
| 285 | 0 |
#!/usr/bin/env python
import ROOT as r
def setTextBoxAttributes(text, color, font):
text.SetTextColor(color)
text.SetTextAlign(13)
text.SetTextSize(0.04)
text.SetTextFont(font)
def dress(canv, colors):
# need this to keep the objects alive
objs = []
vub_inc = r.TLatex(0.16,0.655,"V_{ub} inclusive")
vub_inc.SetNDC()
vub_inc.Draw("same")
objs.append(vub_inc)
vcb_inc = r.TLatex(0.68,0.72,"V_{cb} inclusive")
vcb_inc.SetNDC()
vcb_inc.SetTextAngle(90)
vcb_inc.Draw("same")
objs.append(vcb_inc)
vub_excl = r.TLatex(0.16,0.455,"V_{ub} exclusive")
vub_excl.SetNDC()
vub_excl.Draw("same")
objs.append(vub_excl)
vcb_excl = r.TLatex(0.45,0.72,"V_{cb} exclusive")
vcb_excl.SetNDC()
vcb_excl.SetTextAngle(90)
vcb_excl.Draw("same")
objs.append(vcb_excl)
vub_vcb_lhcb = r.TLatex(0.17,0.29,"V_{ub}/V_{cb} LHCb")
vub_vcb_lhcb.SetNDC()
vub_vcb_lhcb.SetTextAngle(8)
vub_vcb_lhcb.Draw("same")
objs.append(vub_vcb_lhcb)
indirect = r.TLatex(0.17,0.38,"Indirect (CKM fitter)")
indirect.SetNDC()
indirect.SetTextAngle(9)
indirect.Draw("same")
objs.append(indirect)
comb_inc = r.TLatex(0.66,0.61,"Comb. incl.")
comb_inc.SetNDC()
comb_inc.Draw("same")
objs.append(comb_inc)
comb_excl = r.TLatex(0.43,0.40,"Comb. excl.")
comb_excl.SetNDC()
comb_excl.SetTextAngle(0)
comb_excl.Draw("same")
objs.append(comb_excl)
group1 = r.TLatex(0.18,0.85,"#splitline{PDG 2014 +}{CKM fitter +}")
group1.SetNDC()
group1.SetTextFont(132)
group1.SetTextSize(0.05)
group1.Draw("same")
objs.append(group1)
group2 = r.TLatex(0.18,0.75,"#splitline{#Lambda_{b}#rightarrowp#mu#nu (LHCb)}{}")
group2.SetNDC()
group2.SetTextFont(132)
group2.SetTextSize(0.05)
group2.Draw("same")
objs.append(group2)
canv.Update()
canv.Modified()
return objs
|
matthewkenzie/gammacombo
|
scripts/plotModuleExample.py
|
Python
|
gpl-3.0
| 1,823 | 0.044981 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
## Copyright © 2007-2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from homevent.reactor import ShutdownHandler
from homevent.module import load_module
from homevent.statement import main_words
from test import run
input = """\
block:
if exists path "..":
log DEBUG Yes
else:
log DEBUG No1
if exists path "...":
log DEBUG No2
else:
log DEBUG Yes
if exists directory "..":
log DEBUG Yes
else:
log DEBUG No3
if exists directory "README":
log DEBUG No4
else:
log DEBUG Yes
if exists file "README":
log DEBUG Yes
else:
log DEBUG No5
if exists file "..":
log DEBUG No6
else:
log DEBUG Yes
shutdown
"""
main_words.register_statement(ShutdownHandler)
load_module("logging")
load_module("ifelse")
load_module("path")
load_module("block")
run("path",input)
|
smurfix/HomEvenT
|
test/mod_path.py
|
Python
|
gpl-3.0
| 1,386 | 0.025271 |
from django.core.validators import validate_email
from django import forms
from captcha.fields import ReCaptchaField
from .models import ContactUs
class CreateContact(forms.ModelForm):
captcha = ReCaptchaField()
class Meta:
model = ContactUs
fields = '__all__'
widgets = {
'email': forms.EmailInput({'required': 'required',
'placeholder': 'Email'}),
'message': forms.Textarea(attrs={'required': 'required',
'placeholder': 'Message'})
}
def clean_first_name(self):
first_name = self.cleaned_data['first_name']
if not first_name.isalpha():
raise forms.ValidationError("Introdu un prenume valid")
return first_name
def clean_email(self):
email = self.cleaned_data['email']
if validate_email(email):
raise forms.ValidationError("Adresa de email nu e valida")
return email
def clean_last_name(self):
last_name = self.cleaned_data['last_name']
if not last_name.isalpha():
raise forms.ValidationError("Introdu un nume corect")
return last_name
def clean_message(self):
message = self.cleaned_data['message']
if len(message) < 50:
raise forms.ValidationError(
"Mesajul tau e prea scurt!"
"Trebuie sa contina minim 50 de caractere")
return message
|
emanuelcovaci/TLT
|
blog/contact/forms.py
|
Python
|
agpl-3.0
| 1,487 | 0.000672 |
# -*- coding: utf-8 -*-
# django-simple-help
# simple_help/admin.py
from __future__ import unicode_literals
from django.contrib import admin
try: # add modeltranslation
from modeltranslation.translator import translator
from modeltranslation.admin import TabbedDjangoJqueryTranslationAdmin
except ImportError:
pass
from simple_help.models import PageHelp
from simple_help.forms import PageHelpAdminForm
from simple_help.utils import modeltranslation
try:
from simple_help.translation import PageHelpTranslationOptions
except ImportError:
pass
__all__ = [
"PageHelpAdmin",
]
class PageHelpAdmin(TabbedDjangoJqueryTranslationAdmin if modeltranslation() else admin.ModelAdmin):
"""
Customize PageHelp model for admin area.
"""
list_display = ["page", "title", ]
search_fields = ["title", ]
list_filter = ["page", ]
form = PageHelpAdminForm
if modeltranslation():
# registering translation options
translator.register(PageHelp, PageHelpTranslationOptions)
# registering admin custom classes
admin.site.register(PageHelp, PageHelpAdmin)
|
DCOD-OpenSource/django-simple-help
|
simple_help/admin.py
|
Python
|
mit
| 1,108 | 0.000903 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2013, IOhannes m zmölnig, IEM
# This file is part of WILMix
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WILMix. If not, see <http://www.gnu.org/licenses/>.
from PySide.QtNetwork import QHostInfo, QHostAddress
def getAddress(hostname, preferIPv6=None):
# IPv6=true: prefer IPv6 addresses (if there are none, the function might still return IPv4)
# IPv6=false: prefer IPv4 addresses (if there are none, the function might still return IPv6)
# IPv6=None: first available address returned
info=QHostInfo()
adr=info.fromName(hostname).addresses()
if not adr: return None
if preferIPv6 is None:
return adr[0].toString()
for a_ in adr:
a=QHostAddress(a_)
if preferIPv6:
if a.toIPv6Address():
return a.toString()
else:
if a.toIPv4Address():
return a.toString()
return adr[0].toString()
if __name__ == '__main__':
def testfun(name, ipv6):
addr=getAddress(name, ipv6)
print("%s -> %s" % (name, addr))
import sys
progname=sys.argv[0]
ipv6=None
args=[]
if len(sys.argv)>1:
s=sys.argv[1]
if s.startswith('-'):
args=sys.argv[2:]
if "-ipv4" == s:
ipv6=False
elif "-ipv6" == s:
ipv6=True
else:
print("Usage: resolv.py [-ipv4|-ipv6] <host1> [<host2> ...]")
sys.exit(1)
else:
args=sys.argv[1:]
if not args:
args=['localhost', 'umlautq', 'example.com']
for h in args:
testfun(h,ipv6)
|
iem-projects/WILMAmix
|
WILMA/net/resolv.py
|
Python
|
gpl-2.0
| 2,201 | 0.011369 |
# coding=UTF-8
"""
Tests courseware views.py
"""
import unittest
from datetime import datetime
from mock import MagicMock, patch
from pytz import UTC
from django.test import TestCase
from django.http import Http404
from django.test.utils import override_settings
from django.contrib.auth.models import User, AnonymousUser
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory
from edxmako.middleware import MakoMiddleware
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory
import courseware.views as views
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from course_modes.models import CourseMode
import shoppingcart
from util.tests.test_date_utils import fake_ugettext, fake_pgettext
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestJumpTo(TestCase):
"""
Check the jumpto link for a course.
"""
def setUp(self):
# Use toy course from XML
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_jumpto_invalid_location(self):
location = self.course_key.make_usage_key(None, 'NoSuchPlace')
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
def test_jumpto_from_chapter(self):
location = self.course_key.make_usage_key('chapter', 'Overview')
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id(self):
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), 'Overview')
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id_invalid_location(self):
location = Location('edX', 'toy', 'NoSuchPlace', None, None, None)
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewsTestCase(TestCase):
"""
Tests for views.py methods.
"""
def setUp(self):
course = CourseFactory()
chapter = ItemFactory(category='chapter', parent_location=course.location) # pylint: disable=no-member
section = ItemFactory(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
vertical = ItemFactory(category='vertical', parent_location=section.location)
self.component = ItemFactory(category='problem', parent_location=vertical.location)
self.course_key = course.id
self.user = User.objects.create(username='dummy', password='123456',
email='test@mit.edu')
self.date = datetime(2013, 1, 22, tzinfo=UTC)
self.enrollment = CourseEnrollment.enroll(self.user, self.course_key)
self.enrollment.created = self.date
self.enrollment.save()
self.request_factory = RequestFactory()
chapter = 'Overview'
self.chapter_url = '%s/%s/%s' % ('/courses', self.course_key, chapter)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SHOPPING_CART'), "Shopping Cart not enabled in settings")
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_course_about_in_cart(self):
in_cart_span = '<span class="add-to-cart">'
# don't mock this course due to shopping cart existence checking
course = CourseFactory.create(org="new", number="unenrolled", display_name="course")
request = self.request_factory.get(reverse('about_course', args=[course.id.to_deprecated_string()]))
request.user = AnonymousUser()
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# authenticated user with nothing in cart
request.user = self.user
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# now add the course to the cart
cart = shoppingcart.models.Order.get_cart_for_user(self.user)
shoppingcart.models.PaidCourseRegistration.add_to_order(cart, course.id)
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertIn(in_cart_span, response.content)
def test_user_groups(self):
# depreciated function
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertEqual(views.user_groups(mock_user), [])
def test_get_current_child(self):
self.assertIsNone(views.get_current_child(MagicMock()))
mock_xmodule = MagicMock()
mock_xmodule.position = -1
mock_xmodule.get_display_items.return_value = ['one', 'two']
self.assertEqual(views.get_current_child(mock_xmodule), 'one')
mock_xmodule_2 = MagicMock()
mock_xmodule_2.position = 3
mock_xmodule_2.get_display_items.return_value = []
self.assertIsNone(views.get_current_child(mock_xmodule_2))
def test_redirect_to_course_position(self):
mock_module = MagicMock()
mock_module.descriptor.id = 'Underwater Basketweaving'
mock_module.position = 3
mock_module.get_display_items.return_value = []
self.assertRaises(Http404, views.redirect_to_course_position,
mock_module)
def test_registered_for_course(self):
self.assertFalse(views.registered_for_course('Basketweaving', None))
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertFalse(views.registered_for_course('dummy', mock_user))
mock_course = MagicMock()
mock_course.id = self.course_key
self.assertTrue(views.registered_for_course(mock_course, self.user))
def test_jump_to_invalid(self):
# TODO add a test for invalid location
# TODO add a test for no data *
request = self.request_factory.get(self.chapter_url)
self.assertRaisesRegexp(Http404, 'Invalid course_key or usage_key', views.jump_to,
request, 'bar', ())
def test_no_end_on_about_page(self):
# Toy course has no course end date or about/end_date blob
self.verify_end_date('edX/toy/TT_2012_Fall')
def test_no_end_about_blob(self):
# test_end has a course end date, no end_date HTML blob
self.verify_end_date("edX/test_end/2012_Fall", "Sep 17, 2015")
def test_about_blob_end_date(self):
# test_about_blob_end_date has both a course end date and an end_date HTML blob.
# HTML blob wins
self.verify_end_date("edX/test_about_blob_end_date/2012_Fall", "Learning never ends")
def verify_end_date(self, course_id, expected_end_text=None):
"""
Visits the about page for `course_id` and tests that both the text "Classes End", as well
as the specified `expected_end_text`, is present on the page.
If `expected_end_text` is None, verifies that the about page *does not* contain the text
"Classes End".
"""
request = self.request_factory.get("foo")
request.user = self.user
# TODO: Remove the dependency on MakoMiddleware (by making the views explicitly supply a RequestContext)
MakoMiddleware().process_request(request)
result = views.course_about(request, course_id)
if expected_end_text is not None:
self.assertContains(result, "Classes End")
self.assertContains(result, expected_end_text)
else:
self.assertNotContains(result, "Classes End")
def test_chat_settings(self):
mock_user = MagicMock()
mock_user.username = "johndoe"
mock_course = MagicMock()
mock_course.id = "a/b/c"
# Stub this out in the case that it's not in the settings
domain = "jabber.edx.org"
settings.JABBER_DOMAIN = domain
chat_settings = views.chat_settings(mock_course, mock_user)
# Test the proper format of all chat settings
self.assertEqual(chat_settings['domain'], domain)
self.assertEqual(chat_settings['room'], "a-b-c_class")
self.assertEqual(chat_settings['username'], "johndoe@%s" % domain)
# TODO: this needs to be changed once we figure out how to
# generate/store a real password.
self.assertEqual(chat_settings['password'], "johndoe@%s" % domain)
def test_course_mktg_about_coming_soon(self):
# we should not be able to find this course
url = reverse('mktg_about_course', kwargs={'course_id': 'no/course/here'})
response = self.client.get(url)
self.assertIn('Coming Soon', response.content)
def test_course_mktg_register(self):
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('mktg_about_course', kwargs={'course_id': self.course_key.to_deprecated_string()})
response = self.client.get(url)
self.assertIn('Register for', response.content)
self.assertNotIn('and choose your student track', response.content)
def test_course_mktg_register_multiple_modes(self):
admin = AdminFactory()
CourseMode.objects.get_or_create(mode_slug='honor',
mode_display_name='Honor Code Certificate',
course_id=self.course_key)
CourseMode.objects.get_or_create(mode_slug='verified',
mode_display_name='Verified Certificate',
course_id=self.course_key)
self.client.login(username=admin.username, password='test')
url = reverse('mktg_about_course', kwargs={'course_id': self.course_key.to_deprecated_string()})
response = self.client.get(url)
self.assertIn('Register for', response.content)
self.assertIn('and choose your student track', response.content)
# clean up course modes
CourseMode.objects.all().delete()
def test_submission_history_accepts_valid_ids(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': unicode(self.component.location)
})
response = self.client.get(url)
# Tests that we do not get an "Invalid x" response when passing correct arguments to view
self.assertFalse('Invalid' in response.content)
def test_submission_history_xss(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
# try it with an existing user and a malicious location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': '<script>alert("hello");</script>'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# try it with a malicious user and a non-existent location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': '<script>alert("hello");</script>',
'location': 'dummy'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# setting TIME_ZONE_DISPLAYED_FOR_DEADLINES explicitly
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE, TIME_ZONE_DISPLAYED_FOR_DEADLINES="UTC")
class BaseDueDateTests(ModuleStoreTestCase):
"""
Base class that verifies that due dates are rendered correctly on a page
"""
__test__ = False
def get_text(self, course): # pylint: disable=unused-argument
"""Return the rendered text for the page to be verified"""
raise NotImplementedError
def set_up_course(self, **course_kwargs):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory(**course_kwargs)
chapter = ItemFactory(category='chapter', parent_location=course.location) # pylint: disable=no-member
section = ItemFactory(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
vertical = ItemFactory(category='vertical', parent_location=section.location)
ItemFactory(category='problem', parent_location=vertical.location)
course = modulestore().get_course(course.id) # pylint: disable=no-member
self.assertIsNotNone(course.get_children()[0].get_children()[0].due)
return course
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
self.time_with_tz = "due Sep 18, 2013 at 11:30 UTC"
self.time_without_tz = "due Sep 18, 2013 at 11:30"
def test_backwards_compatability(self):
# The test course being used has show_timezone = False in the policy file
# (and no due_date_display_format set). This is to test our backwards compatibility--
# in course_module's init method, the date_display_format will be set accordingly to
# remove the timezone.
course = self.set_up_course(due_date_display_format=None, show_timezone=False)
text = self.get_text(course)
self.assertIn(self.time_without_tz, text)
self.assertNotIn(self.time_with_tz, text)
# Test that show_timezone has been cleared (which means you get the default value of True).
self.assertTrue(course.show_timezone)
def test_defaults(self):
course = self.set_up_course()
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_none(self):
# Same for setting the due date to None
course = self.set_up_course(due_date_display_format=None)
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_plain_text(self):
# plain text due date
course = self.set_up_course(due_date_display_format="foobar")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due foobar", text)
def test_format_date(self):
# due date with no time
course = self.set_up_course(due_date_display_format=u"%b %d %y")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due Sep 18 13", text)
def test_format_hidden(self):
# hide due date completely
course = self.set_up_course(due_date_display_format=u"")
text = self.get_text(course)
self.assertNotIn("due ", text)
def test_format_invalid(self):
# improperly formatted due_date_display_format falls through to default
# (value of show_timezone does not matter-- setting to False to make that clear).
course = self.set_up_course(due_date_display_format=u"%%%", show_timezone=False)
text = self.get_text(course)
self.assertNotIn("%%%", text)
self.assertIn(self.time_with_tz, text)
class TestProgressDueDate(BaseDueDateTests):
"""
Test that the progress page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the progress page """
return views.progress(self.request, course.id.to_deprecated_string(), self.user.id).content
class TestAccordionDueDate(BaseDueDateTests):
"""
Test that the accordion page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the accordion """
return views.render_accordion(
self.request, course, course.get_children()[0].scope_ids.usage_id.to_deprecated_string(), None, None
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StartDateTests(ModuleStoreTestCase):
"""
Test that start dates are properly localized and displayed on the student
dashboard.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
def set_up_course(self):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory(start=datetime(2013, 9, 16, 7, 17, 28))
course = modulestore().get_course(course.id) # pylint: disable=no-member
return course
def get_about_text(self, course_key):
"""
Get the text of the /about page for the course.
"""
text = views.course_about(self.request, course_key.to_deprecated_string()).content
return text
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Sep"): "SEPTEMBER",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_studio_course(self):
course = self.set_up_course()
text = self.get_about_text(course.id)
# The start date is set in the set_up_course function above.
self.assertIn("2013-SEPTEMBER-16", text)
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Jul"): "JULY",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_xml_course(self):
text = self.get_about_text(SlashSeparatedCourseKey('edX', 'toy', 'TT_2012_Fall'))
# The start date is set in common/test/data/two_toys/policies/TT_2012_Fall/policy.json
self.assertIn("2015-JULY-17", text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ProgressPageTests(ModuleStoreTestCase):
"""
Tests that verify that the progress page works correctly.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
MakoMiddleware().process_request(self.request)
course = CourseFactory(
start=datetime(2013, 9, 16, 7, 17, 28),
grade_cutoffs={u'çü†øƒƒ': 0.75, 'Pass': 0.5},
)
self.course = modulestore().get_course(course.id) # pylint: disable=no-member
self.chapter = ItemFactory(category='chapter', parent_location=self.course.location) # pylint: disable=no-member
self.section = ItemFactory(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory(category='vertical', parent_location=self.section.location)
def test_pure_ungraded_xblock(self):
ItemFactory(category='acid', parent_location=self.vertical.location)
resp = views.progress(self.request, self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
def test_non_asci_grade_cutoffs(self):
resp = views.progress(self.request, self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
|
morenopc/edx-platform
|
lms/djangoapps/courseware/tests/test_views.py
|
Python
|
agpl-3.0
| 21,213 | 0.002641 |
# ------------------------------------------------------------------------------
# This extension adds support for Jinja templates.
# ------------------------------------------------------------------------------
import sys
from ivy import hooks, site, templates
try:
import jinja2
except ImportError:
jinja2 = None
# Stores an initialized Jinja environment instance.
env = None
# The jinja2 package is an optional dependency.
if jinja2:
# Initialize our Jinja environment on the 'init' event hook.
@hooks.register('init')
def init():
# Initialize a template loader.
settings = {
'loader': jinja2.FileSystemLoader(site.theme('templates'))
}
# Check the site's config file for any custom settings.
settings.update(site.config.get('jinja', {}))
# Initialize an Environment instance.
global env
env = jinja2.Environment(**settings)
# Register our template engine callback for files with a .jinja extension.
@templates.register('jinja')
def callback(page, filename):
try:
template = env.get_template(filename)
return template.render(page)
except jinja2.TemplateError as err:
msg = "------------------------\n"
msg += " Jinja Template Error \n"
msg += "------------------------\n\n"
msg += " Template: %s\n" % filename
msg += " Page: %s\n\n" % page['filepath']
msg += " %s: %s" % (err.__class__.__name__, err)
if err.__context__:
cause = err.__context__
msg += "\n\n The following cause was reported:\n\n"
msg += " %s: %s" % (cause.__class__.__name__, cause)
sys.exit(msg)
|
dmulholland/ivy
|
ivy/ext/ivy_jinja.py
|
Python
|
unlicense
| 1,779 | 0.000562 |
from django import forms
from django.forms import Form, ModelForm
from django.utils import timezone
from webapp.models import Task, TaskGroup, TaskGroupSet
from webapp.validators import validate_package
from webapp.widgets import CustomSplitDateTimeWidget
class TaskGroupForm(ModelForm):
class Meta:
model = TaskGroup
fields = '__all__'
exclude = ['raw_csv', 'is_public']
labels = {
'name': 'Group name',
'description': 'Description',
'is_public': 'Public'
}
help_texts = {
'is_public': 'determines whether group is public or not'
}
def __init__(self, *args, **kwargs):
kwargs.pop('edit', None)
super(TaskGroupForm, self).__init__(*args, **kwargs)
class TaskGroupCSVForm(Form):
file = forms.FileField()
upload_csv = forms.IntegerField(initial=1, widget=forms.HiddenInput)
class TaskGroupAccessForm(Form):
grant_single = forms.IntegerField(initial=1, widget=forms.HiddenInput)
username = forms.CharField(
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'username', 'class': 'form-control'})
)
class TaskGroupInviteForm(Form):
send_invitation = forms.CharField(initial=1, widget=forms.HiddenInput)
email = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder': 'E-mail', 'class': 'form-control'}))
class TaskForm(ModelForm):
deadline = forms.SplitDateTimeField(
input_date_formats=['%Y-%m-%d'],
input_time_formats=['%H:%M:%S'],
widget=CustomSplitDateTimeWidget(
date_attrs={'placeholder': 'Date: yyyy-mm-dd', 'data-dpk': '1'},
time_attrs={'placeholder': 'Time: hh:mm:ss'},
date_format='%Y-%m-%d',
time_format='%H:%M:%S'
),
help_text='Set blank if no deadline',
required=False
)
package = forms.FileField(
label='Package',
help_text='.zip package created according to guidelines',
widget=forms.FileInput,
validators=[validate_package]
)
class Meta:
model = Task
fields = '__all__'
exclude = ['task_group']
labels = {
'name': 'Task name',
'description_brief': 'Short description',
'tg_set': 'Task set',
'submission_limit': 'Submissions limit',
'result_type': 'Result priority',
'files_count_limit': 'Max. files amount',
'file_size_limit': 'Max. file size'
}
help_texts = {
'description': 'Markdown can be used here',
'description_brief': 'Short description will be shown on the tasks list page',
'tg_set': 'Task set to which this task belongs',
'result_type': 'Pattern, according to which results list will appear.',
'submission_limit': 'Limit of submissions per user. Put 0 if unlimited',
'files_count_limit': 'Maximal amount of files in one submission',
'file_size_limit': 'Maximal size of single file (in bytes)'
}
widgets = {
'package': forms.FileInput
}
def __init__(self, *args, **kwargs):
edit = kwargs.pop('edit', None)
super(TaskForm, self).__init__(*args, **kwargs)
if edit:
self.fields['package'].label = 'New package'
self.fields['package'].required = False
self.fields['tg_set'].queryset = TaskGroupSet.objects.filter(task_group_id=self.instance.task_group_id)
else:
self.fields['deadline'].initial = timezone.now() + timezone.timedelta(days=14)
del self.fields['tg_set']
class InvalidateSubmissionForm(Form):
comment = forms.CharField(
label='Your comment',
widget=forms.Textarea(attrs={'placeholder': 'Type in the reason here'}),
required=True
)
class CopyTaskGroup(Form):
name = forms.CharField(
label='New name',
widget=forms.TextInput(attrs={'placeholder': 'New name'}),
required=True
)
description = forms.CharField(
label='Description',
widget=forms.Textarea(attrs={'placeholder': 'Type in new description (optional)'}),
required=False
)
class TaskGroupSetForm(ModelForm):
class Meta:
model = TaskGroupSet
fields = '__all__'
exclude = ['task_group']
labels = {
'name': 'Name',
'description': 'Description'
}
class TaskGroupBulkDeadlines(Form):
set_id = forms.IntegerField(
required=True,
widget=forms.HiddenInput()
)
deadline = forms.SplitDateTimeField(
input_date_formats=['%Y-%m-%d'],
input_time_formats=['%H:%M:%S'],
widget=CustomSplitDateTimeWidget(
date_attrs={'placeholder': 'Date: yyyy-mm-dd', 'data-dpk': '1'},
time_attrs={'placeholder': 'Time: hh:mm:ss'},
date_format='%Y-%m-%d',
time_format='%H:%M:%S'
),
required=False,
label='name of the set'
)
def __init__(self, *args, **kwargs):
super(TaskGroupBulkDeadlines, self).__init__(*args, **kwargs)
self.fields['deadline'].label = self.initial.get('set_name')
class FeedbackFrom(Form):
TOPIC = (
('', '- Please select -'),
('proposal', 'I have a proposal'),
('report', 'I want to report a problem'),
('question', 'I have a question'),
('other', 'Other')
)
theme = forms.ChoiceField(label='What happened?', choices=TOPIC)
email = forms.EmailField(
label='',
widget=forms.EmailInput(attrs={'placeholder': 'Contact e-mail'})
)
content = forms.CharField(
label='Write your message here:',
widget=forms.Textarea
)
class InternalLoginForm(Form):
username = forms.CharField(label='Username')
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class InternalRegisterForm(Form):
username = forms.CharField(min_length=3, label='Username')
password = forms.CharField(min_length=8, label='Password', widget=forms.PasswordInput)
repeat_password = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
first_name = forms.CharField(min_length=1, label='First name')
last_name = forms.CharField(min_length=1, label='Last name')
email = forms.CharField(label='E-mail address', widget=forms.EmailInput)
class PasswordForgetInitForm(Form):
username = forms.CharField(min_length=3, label='Username')
email = forms.CharField(label='E-mail address', widget=forms.EmailInput)
class PasswordForgetResetForm(Form):
password = forms.CharField(min_length=8, label='Password', widget=forms.PasswordInput)
repeat_password = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
|
algochecker/algochecker-web
|
webapp/forms.py
|
Python
|
mit
| 6,858 | 0.001896 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 3cb5d900c5de
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '3cb5d900c5de'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type',
sa.Enum('flat', 'vlan', 'stt', 'gre',
name='nvp_network_bindings_binding_type'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
Brocade-OpenSource/OpenStack-DNRM-Neutron
|
neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
|
Python
|
apache-2.0
| 2,023 | 0.001483 |
from django.contrib import admin
from django.utils.encoding import smart_text
from import_export.admin import ExportMixin
from remo.dashboard.models import ActionItem
def encode_action_item_names(modeladmin, request, queryset):
for obj in queryset:
ActionItem.objects.filter(pk=obj.id).update(name=smart_text(obj.name))
encode_action_item_names.short_description = 'Encode action item names'
class ActionItemAdmin(ExportMixin, admin.ModelAdmin):
model = ActionItem
list_display = ('__unicode__', 'user', 'due_date', 'created_on',
'priority', 'updated_on', 'object_id',)
search_fields = ['user__first_name', 'user__last_name',
'user__userprofile__display_name', 'name']
actions = [encode_action_item_names]
admin.site.register(ActionItem, ActionItemAdmin)
|
tsmrachel/remo
|
remo/dashboard/admin.py
|
Python
|
bsd-3-clause
| 835 | 0 |
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Prepares the Google Play services split client libraries before usage by
Chrome's build system.
We need to preprocess Google Play services before using it in Chrome
builds for 2 main reasons:
- Getting rid of unused resources: unsupported languages, unused
drawables, etc.
- Merging the differents jars so that it can be proguarded more
easily. This is necessary since debug and test apks get very close
to the dex limit.
The script is supposed to be used with the maven repository that can be
obtained by downloading the "extra-google-m2repository" from the Android SDK
Manager. It also supports importing from already extracted AAR files using the
--is-extracted-repo flag. The expected directory structure in that case would
look like:
REPOSITORY_DIR
+-- CLIENT_1
| +-- <content of the first AAR file>
+-- CLIENT_2
+-- etc.
The output is a directory with the following structure:
OUT_DIR
+-- google-play-services.jar
+-- res
| +-- CLIENT_1
| | +-- color
| | +-- values
| | +-- etc.
| +-- CLIENT_2
| +-- ...
+-- stub
+-- res/[.git-keep-directory]
+-- src/android/UnusedStub.java
Requires the `jar` utility in the path.
'''
import argparse
import glob
import itertools
import os
import shutil
import stat
import sys
import tempfile
import zipfile
from datetime import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import devil_chromium
from devil.utils import cmd_helper
from play_services import utils
from pylib.utils import argparse_utils
def main():
parser = argparse.ArgumentParser(description=(
"Prepares the Google Play services split client libraries before usage "
"by Chrome's build system. See the script's documentation for more a "
"detailed help."))
argparse_utils.CustomHelpAction.EnableFor(parser)
required_args = parser.add_argument_group('required named arguments')
required_args.add_argument('-r',
'--repository',
help=('the Google Play services repository '
'location'),
required=True,
metavar='FILE')
required_args.add_argument('-o',
'--out-dir',
help='the output directory',
required=True,
metavar='FILE')
required_args.add_argument('-c',
'--config-file',
help='the config file path',
required=True,
metavar='FILE')
parser.add_argument('-x',
'--is-extracted-repo',
action='store_true',
help='the provided repository is not made of AAR files')
parser.add_argument('--config-help',
action='custom_help',
custom_help_text=utils.ConfigParser.__doc__,
help='show the configuration file format help')
args = parser.parse_args()
devil_chromium.Initialize()
return ProcessGooglePlayServices(args.repository,
args.out_dir,
args.config_file,
args.is_extracted_repo)
def ProcessGooglePlayServices(repo, out_dir, config_path, is_extracted_repo):
config = utils.ConfigParser(config_path)
tmp_root = tempfile.mkdtemp()
try:
tmp_paths = _SetupTempDir(tmp_root)
if is_extracted_repo:
_ImportFromExtractedRepo(config, tmp_paths, repo)
else:
_ImportFromAars(config, tmp_paths, repo)
_GenerateCombinedJar(tmp_paths)
_ProcessResources(config, tmp_paths, repo)
_BuildOutput(config, tmp_paths, out_dir)
finally:
shutil.rmtree(tmp_root)
return 0
def _SetupTempDir(tmp_root):
tmp_paths = {
'root': tmp_root,
'imported_clients': os.path.join(tmp_root, 'imported_clients'),
'extracted_jars': os.path.join(tmp_root, 'jar'),
'combined_jar': os.path.join(tmp_root, 'google-play-services.jar'),
}
os.mkdir(tmp_paths['imported_clients'])
os.mkdir(tmp_paths['extracted_jars'])
return tmp_paths
def _SetupOutputDir(out_dir):
out_paths = {
'root': out_dir,
'res': os.path.join(out_dir, 'res'),
'jar': os.path.join(out_dir, 'google-play-services.jar'),
'stub': os.path.join(out_dir, 'stub'),
}
shutil.rmtree(out_paths['jar'], ignore_errors=True)
shutil.rmtree(out_paths['res'], ignore_errors=True)
shutil.rmtree(out_paths['stub'], ignore_errors=True)
return out_paths
def _MakeWritable(dir_path):
for root, dirs, files in os.walk(dir_path):
for path in itertools.chain(dirs, files):
st = os.stat(os.path.join(root, path))
os.chmod(os.path.join(root, path), st.st_mode | stat.S_IWUSR)
# E.g. turn "base_1p" into "base"
def _RemovePartySuffix(client):
return client[:-3] if client[-3:] == '_1p' else client
def _ImportFromAars(config, tmp_paths, repo):
for client in config.clients:
client_name = _RemovePartySuffix(client)
aar_name = 'client_' + client + '.aar'
aar_path = os.path.join(repo, client_name, aar_name)
aar_out_path = os.path.join(tmp_paths['imported_clients'], client)
_ExtractAll(aar_path, aar_out_path)
client_jar_path = os.path.join(aar_out_path, 'classes.jar')
_ExtractAll(client_jar_path, tmp_paths['extracted_jars'])
def _ImportFromExtractedRepo(config, tmp_paths, repo):
# Import the clients
try:
for client in config.clients:
client_out_dir = os.path.join(tmp_paths['imported_clients'], client)
shutil.copytree(os.path.join(repo, client), client_out_dir)
client_jar_path = os.path.join(client_out_dir, 'classes.jar')
_ExtractAll(client_jar_path, tmp_paths['extracted_jars'])
finally:
_MakeWritable(tmp_paths['imported_clients'])
def _GenerateCombinedJar(tmp_paths):
out_file_name = tmp_paths['combined_jar']
working_dir = tmp_paths['extracted_jars']
cmd_helper.Call(['jar', '-cf', out_file_name, '-C', working_dir, '.'])
def _ProcessResources(config, tmp_paths, repo):
LOCALIZED_VALUES_BASE_NAME = 'values-'
locale_whitelist = set(config.locale_whitelist)
# The directory structure here is:
# <imported_clients temp dir>/<client name>_1p/res/<res type>/<res file>.xml
for client_dir in os.listdir(tmp_paths['imported_clients']):
client_prefix = _RemovePartySuffix(client_dir) + '_'
res_path = os.path.join(tmp_paths['imported_clients'], client_dir, 'res')
if not os.path.isdir(res_path):
continue
for res_type in os.listdir(res_path):
res_type_path = os.path.join(res_path, res_type)
if res_type.startswith('drawable'):
shutil.rmtree(res_type_path)
continue
if res_type.startswith(LOCALIZED_VALUES_BASE_NAME):
dir_locale = res_type[len(LOCALIZED_VALUES_BASE_NAME):]
if dir_locale not in locale_whitelist:
shutil.rmtree(res_type_path)
continue
if res_type.startswith('values'):
# Beginning with v3, resource file names are not necessarily unique, and
# would overwrite each other when merged at build time. Prefix each
# "values" resource file with its client name.
for res_file in os.listdir(res_type_path):
os.rename(os.path.join(res_type_path, res_file),
os.path.join(res_type_path, client_prefix + res_file))
# Reimport files from the whitelist.
for res_path in config.resource_whitelist:
for whitelisted_file in glob.glob(os.path.join(repo, res_path)):
resolved_file = os.path.relpath(whitelisted_file, repo)
rebased_res = os.path.join(tmp_paths['imported_clients'], resolved_file)
if not os.path.exists(os.path.dirname(rebased_res)):
os.makedirs(os.path.dirname(rebased_res))
shutil.copy(os.path.join(repo, whitelisted_file), rebased_res)
def _BuildOutput(config, tmp_paths, out_dir):
generation_date = datetime.utcnow()
version_xml_path = os.path.join(tmp_paths['imported_clients'],
config.version_xml_path)
play_services_full_version = utils.GetVersionNumberFromLibraryResources(
version_xml_path)
out_paths = _SetupOutputDir(out_dir)
# Copy the resources to the output dir
for client in config.clients:
res_in_tmp_dir = os.path.join(tmp_paths['imported_clients'], client, 'res')
if os.path.isdir(res_in_tmp_dir) and os.listdir(res_in_tmp_dir):
res_in_final_dir = os.path.join(out_paths['res'], client)
shutil.copytree(res_in_tmp_dir, res_in_final_dir)
# Copy the jar
shutil.copyfile(tmp_paths['combined_jar'], out_paths['jar'])
# Write the java dummy stub. Needed for gyp to create the resource jar
stub_location = os.path.join(out_paths['stub'], 'src', 'android')
os.makedirs(stub_location)
with open(os.path.join(stub_location, 'UnusedStub.java'), 'w') as stub:
stub.write('package android;'
'public final class UnusedStub {'
' private UnusedStub() {}'
'}')
# Create the main res directory. It is needed by gyp
stub_res_location = os.path.join(out_paths['stub'], 'res')
os.makedirs(stub_res_location)
with open(os.path.join(stub_res_location, '.res-stamp'), 'w') as stamp:
content_str = 'google_play_services_version: %s\nutc_date: %s\n'
stamp.write(content_str % (play_services_full_version, generation_date))
config.UpdateVersionNumber(play_services_full_version)
def _ExtractAll(zip_path, out_path):
with zipfile.ZipFile(zip_path, 'r') as zip_file:
zip_file.extractall(out_path)
if __name__ == '__main__':
sys.exit(main())
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/android/play_services/preprocess.py
|
Python
|
gpl-3.0
| 9,974 | 0.008823 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Used to access the cr50 console and handle RMA Open
"""Open cr50 using RMA authentication.
Run RMA Open to enable CCD on Cr50. The utility can be used to get a
url that will generate an authcode to open cr50. It can also be used to
try opening cr50 with the generated authcode.
The last challenge is the only valid one, so don't generate a challenge
10 times and then use the first URL. You can only use the last one.
For RMA Open:
Connect suzyq to the dut and your workstation.
Check the basic setup with
sudo python cr50_rma_open.py -c
If the setup is broken. Follow the debug print statements to try to fix
the error. Rerun until the script says Cr50 setup ok.
After the setup is verified, run the following command to generate the
challenge url
sudo python cr50_rma_open.py -g -i $HWID
Go to the URL from by that command to generate an authcode. Once you have
the authcode, you can use it to open cr50.
sudo python cr50_rma_open.py -a $AUTHCODE
If for some reason hardware write protect doesn't get disabled during rma
open or gets enabled at some point the script can be used to disable
write protect.
sudo python cr50_rma_open.py -w
When prepping devices for the testlab, you need to enable testlab mode.
Prod cr50 images can't enable testlab mode. If the device is running a
prod image, you can skip this step.
sudo python cr50_rma_open.py -t
"""
import argparse
import glob
import logging
import re
import subprocess
import sys
import time
import serial
SCRIPT_VERSION = 5
CCD_IS_UNRESTRICTED = 1 << 0
WP_IS_DISABLED = 1 << 1
TESTLAB_IS_ENABLED = 1 << 2
RMA_OPENED = CCD_IS_UNRESTRICTED | WP_IS_DISABLED
URL = ('https://www.google.com/chromeos/partner/console/cr50reset?'
'challenge=%s&hwid=%s')
RMA_SUPPORT_PROD = '0.3.3'
RMA_SUPPORT_PREPVT = '0.4.5'
DEV_MODE_OPEN_PROD = '0.3.9'
DEV_MODE_OPEN_PREPVT = '0.4.7'
TESTLAB_PROD = '0.3.10'
CR50_USB = '18d1:5014'
CR50_LSUSB_CMD = ['lsusb', '-vd', CR50_USB]
ERASED_BID = 'ffffffff'
DEBUG_MISSING_USB = """
Unable to find Cr50 Device 18d1:5014
DEBUG MISSING USB:
- Make sure suzyq is plugged into the correct DUT port
- Try flipping the cable
- unplug the cable for 5s then plug it back in
"""
DEBUG_DEVICE = """
DEBUG DEVICE COMMUNICATION:
Issues communicating with %s
A 18d1:5014 device exists, so make sure you have selected the correct
/dev/ttyUSB
"""
DEBUG_SERIALNAME = """
DEBUG SERIALNAME:
Found the USB device, but can't match the usb serialname. Check the
serialname you passed into cr50_rma_open or try running without a
serialname.
"""
DEBUG_CONNECTION = """
DEBUG CONNECTION:
Found the USB device but cant communicate with any of the consoles.
Try Running cr50_rma_open again. If it still fails unplug the ccd cable
for 5 seconds and plug it back in.
"""
DEBUG_TOO_MANY_USB_DEVICES = """
DEBUG SELECT USB:
More than one cr50 usb device was found. Disconnect all but one device
or use the -s option with the correct usb serialname.
"""
DEBUG_ERASED_BOARD_ID = """
DEBUG ERASED BOARD ID:
If you are using a prePVT device run
/usr/share/cros/cr50-set-board-id.sh proto
If you are running a MP device, please talk to someone.
"""
DEBUG_AUTHCODE_MISMATCH = """
DEBUG AUTHCODE MISMATCH:
- Check the URL matches the one generated by the last cr50_rma_open
run.
- Check you used the correct authcode.
- Make sure the cr50 version is greater than 3.3.
- try generating another URL by rerunning the generate command and
rerunning the process.
"""
DEBUG_DUT_CONTROL_OSERROR = """
Run from chroot if you are trying to use a /dev/pts ccd servo console
"""
class RMAOpen(object):
"""Used to find the cr50 console and run RMA open"""
ENABLE_TESTLAB_CMD = 'ccd testlab enabled\n'
def __init__(self, device=None, usb_serial=None, servo_port=None, ip=None):
self.servo_port = servo_port if servo_port else '9999'
self.ip = ip
if device:
self.set_cr50_device(device)
elif servo_port:
self.find_cr50_servo_uart()
else:
self.find_cr50_device(usb_serial)
logging.info('DEVICE: %s', self.device)
self.check_version()
self.print_platform_info()
logging.info('Cr50 setup ok')
self.update_ccd_state()
self.using_ccd = self.device_is_running_with_servo_ccd()
def _dut_control(self, control):
"""Run dut-control and return the response"""
try:
cmd = ['dut-control', '-p', self.servo_port, control]
return subprocess.check_output(cmd, encoding='utf-8').strip()
except OSError:
logging.warning(DEBUG_DUT_CONTROL_OSERROR)
raise
def find_cr50_servo_uart(self):
"""Save the device used for the console.
Find the console and configure it, so it can be used with this script.
"""
self._dut_control('cr50_uart_timestamp:off')
self.device = self._dut_control('cr50_uart_pty').split(':')[-1]
def set_cr50_device(self, device):
"""Save the device used for the console"""
self.device = device
def send_cmd_get_output(self, cmd, nbytes=0):
"""Send a cr50 command and get the output
Args:
cmd: The cr50 command string
nbytes: The number of bytes to read from the console. If 0 read all
of the console output.
Returns:
The command output
"""
try:
ser = serial.Serial(self.device, timeout=1)
except OSError:
logging.warning('Permission denied %s', self.device)
logging.warning('Try running cr50_rma_open with sudo')
raise
write_cmd = cmd + '\n\n'
ser.write(write_cmd.encode('utf-8'))
if nbytes:
output = ser.read(nbytes)
else:
output = ser.readall()
ser.close()
output = output.decode('utf-8').strip() if output else ''
# Return only the command output
split_cmd = cmd + '\r'
if cmd and split_cmd in output:
return ''.join(output.rpartition(split_cmd)[1::]).split('>')[0]
return output
def device_is_running_with_servo_ccd(self):
"""Return True if the device is a servod ccd console"""
# servod uses /dev/pts consoles. Non-servod uses /dev/ttyUSBX
if '/dev/pts' not in self.device:
return False
# If cr50 doesn't show rdd is connected, cr50 the device must not be
# a ccd device
if 'Rdd: connected' not in self.send_cmd_get_output('ccdstate'):
return False
# Check if the servod is running with ccd. This requires the script
# is run in the chroot, so run it last.
if 'ccd_cr50' not in self._dut_control('servo_type'):
return False
logging.info('running through servod ccd')
return True
def get_rma_challenge(self):
"""Get the rma_auth challenge
There are two challenge formats
"
ABEQ8 UGA4F AVEQP SHCKV
DGGPR N8JHG V8PNC LCHR2
T27VF PRGBS N3ZXF RCCT2
UBMKP ACM7E WUZUA A4GTN
"
and
"
generated challenge:
CBYRYBEMH2Y75TC...rest of challenge
"
support extracting the challenge from both.
Returns:
The RMA challenge with all whitespace removed.
"""
output = self.send_cmd_get_output('rma_auth').strip()
logging.info('rma_auth output:\n%s', output)
# Extract the challenge from the console output
if 'generated challenge:' in output:
return output.split('generated challenge:')[-1].strip()
challenge = ''.join(re.findall(r' \S{5}' * 4, output))
# Remove all whitespace
return re.sub(r'\s', '', challenge)
def generate_challenge_url(self, hwid):
"""Get the rma_auth challenge
Returns:
The RMA challenge with all whitespace removed.
"""
challenge = self.get_rma_challenge()
self.print_platform_info()
logging.info('CHALLENGE: %s', challenge)
logging.info('HWID: %s', hwid)
url = URL % (challenge, hwid)
logging.info('GOTO:\n %s', url)
logging.info('If the server fails to debug the challenge make sure the '
'RLZ is allowlisted')
def try_authcode(self, authcode):
"""Try opening cr50 with the authcode
Raises:
ValueError if there was no authcode match and ccd isn't open
"""
# rma_auth may cause the system to reboot. Don't wait to read all that
# output. Read the first 300 bytes and call it a day.
output = self.send_cmd_get_output('rma_auth ' + authcode, nbytes=300)
logging.info('CR50 RESPONSE: %s', output)
logging.info('waiting for cr50 reboot')
# Cr50 may be rebooting. Wait a bit
time.sleep(5)
if self.using_ccd:
# After reboot, reset the ccd endpoints
self._dut_control('power_state:ccd_reset')
# Update the ccd state after the authcode attempt
self.update_ccd_state()
authcode_match = 'process_response: success!' in output
if not self.check(CCD_IS_UNRESTRICTED):
if not authcode_match:
logging.warning(DEBUG_AUTHCODE_MISMATCH)
message = 'Authcode mismatch. Check args and url'
else:
message = 'Could not set all capability privileges to Always'
raise ValueError(message)
def wp_is_force_disabled(self):
"""Returns True if write protect is forced disabled"""
output = self.send_cmd_get_output('wp')
wp_state = output.split('Flash WP:', 1)[-1].split('\n', 1)[0].strip()
logging.info('wp: %s', wp_state)
return wp_state == 'forced disabled'
def testlab_is_enabled(self):
"""Returns True if testlab mode is enabled"""
output = self.send_cmd_get_output('ccd testlab')
testlab_state = output.split('mode')[-1].strip().lower()
logging.info('testlab: %s', testlab_state)
return testlab_state == 'enabled'
def ccd_is_restricted(self):
"""Returns True if any of the capabilities are still restricted"""
output = self.send_cmd_get_output('ccd')
if 'Capabilities' not in output:
raise ValueError('Could not get ccd output')
logging.debug('CURRENT CCD SETTINGS:\n%s', output)
restricted = 'IfOpened' in output or 'IfUnlocked' in output
logging.info('ccd: %srestricted', '' if restricted else 'Un')
return restricted
def update_ccd_state(self):
"""Get the wp and ccd state from cr50. Save it in _ccd_state"""
self._ccd_state = 0
if not self.ccd_is_restricted():
self._ccd_state |= CCD_IS_UNRESTRICTED
if self.wp_is_force_disabled():
self._ccd_state |= WP_IS_DISABLED
if self.testlab_is_enabled():
self._ccd_state |= TESTLAB_IS_ENABLED
def check(self, setting):
"""Returns true if the all of the 1s in setting are 1 in _ccd_state"""
return self._ccd_state & setting == setting
def _has_testlab_support(self):
"""Return True if you can enable testlab mode"""
# all prepvt images can enable testlab
if self.is_prepvt:
return True
return not self._running_version_is_older(DEV_MODE_OPEN_PROD)
def _capabilities_allow_open_from_console(self):
"""Return True if ccd open is Always allowed from usb"""
output = self.send_cmd_get_output('ccd')
return (re.search('OpenNoDevMode.*Always', output) and
re.search('OpenFromUSB.*Always', output))
def _requires_dev_mode_open(self):
"""Return True if the image requires dev mode to open"""
if self._capabilities_allow_open_from_console():
return False
# All prod images that support 'open' require dev mode
if not self.is_prepvt:
return True
return not self._running_version_is_older(DEV_MODE_OPEN_PREPVT)
def _run_on_dut(self, command):
"""Run the command on the DUT."""
return subprocess.check_output(['ssh', self.ip, command],
encoding='utf-8')
def _open_in_dev_mode(self):
"""Open Cr50 when it's in dev mode"""
output = self.send_cmd_get_output('ccd')
# If the device is already open, nothing needs to be done.
if 'State: Open' not in output:
# Verify the device is in devmode before trying to run open.
if 'dev_mode' not in output:
logging.warning('Enter dev mode to open ccd or update to %s',
TESTLAB_PROD)
raise ValueError('DUT not in dev mode')
if not self.ip:
logging.warning("If your DUT doesn't have ssh support, run "
"'gsctool -a -o' from the AP")
raise ValueError('Cannot run ccd open without dut ip')
self._run_on_dut('gsctool -a -o')
# Wait >1 second for cr50 to update ccd state
time.sleep(3)
output = self.send_cmd_get_output('ccd')
if 'State: Open' not in output:
raise ValueError('Could not open cr50')
logging.info('ccd is open')
def enable_testlab(self):
"""Disable write protect"""
if not self._has_testlab_support():
logging.warning('Testlab mode is not supported in prod iamges')
return
# Some cr50 images need to be in dev mode before they can be opened.
if self._requires_dev_mode_open():
self._open_in_dev_mode()
else:
self.send_cmd_get_output('ccd open')
logging.info('Enabling testlab mode reqires pressing the power button.')
logging.info('Once the process starts keep tapping the power button '
'for 10 seconds.')
input("Press Enter when you're ready to start...")
end_time = time.time() + 15
ser = serial.Serial(self.device, timeout=1)
printed_lines = ''
output = ''
# start ccd testlab enable
ser.write(self.ENABLE_TESTLAB_CMD.encode('utf-8'))
logging.info('start pressing the power button\n\n')
# Print all of the cr50 output as we get it, so the user will have more
# information about pressing the power button. Tapping the power button
# a couple of times should do it, but this will give us more confidence
# the process is still running/worked.
try:
while time.time() < end_time:
output += ser.read(100).decode('utf-8')
full_lines = output.rsplit('\n', 1)[0]
new_lines = full_lines
if printed_lines:
new_lines = full_lines.split(printed_lines, 1)[-1].strip()
logging.info('\n%s', new_lines)
printed_lines = full_lines
# Make sure the process hasn't ended. If it has, print the last
# of the output and exit.
new_lines = output.split(printed_lines, 1)[-1]
if 'CCD test lab mode enabled' in output:
# print the last of the ou
logging.info(new_lines)
break
elif 'Physical presence check timeout' in output:
logging.info(new_lines)
logging.warning('Did not detect power button press in time')
raise ValueError('Could not enable testlab mode try again')
finally:
ser.close()
# Wait for the ccd hook to update things
time.sleep(3)
# Update the state after attempting to disable write protect
self.update_ccd_state()
if not self.check(TESTLAB_IS_ENABLED):
raise ValueError('Could not enable testlab mode try again')
def wp_disable(self):
"""Disable write protect"""
logging.info('Disabling write protect')
self.send_cmd_get_output('wp disable')
# Update the state after attempting to disable write protect
self.update_ccd_state()
if not self.check(WP_IS_DISABLED):
raise ValueError('Could not disable write protect')
def check_version(self):
"""Make sure cr50 is running a version that supports RMA Open"""
output = self.send_cmd_get_output('version')
if not output.strip():
logging.warning(DEBUG_DEVICE, self.device)
raise ValueError('Could not communicate with %s' % self.device)
version = re.search(r'RW.*\* ([\d\.]+)/', output).group(1)
logging.info('Running Cr50 Version: %s', version)
self.running_ver_fields = [int(field) for field in version.split('.')]
# prePVT images have even major versions. Prod have odd
self.is_prepvt = self.running_ver_fields[1] % 2 == 0
rma_support = RMA_SUPPORT_PREPVT if self.is_prepvt else RMA_SUPPORT_PROD
logging.info('%s RMA support added in: %s',
'prePVT' if self.is_prepvt else 'prod', rma_support)
if not self.is_prepvt and self._running_version_is_older(TESTLAB_PROD):
raise ValueError('Update cr50. No testlab support in old prod '
'images.')
if self._running_version_is_older(rma_support):
raise ValueError('%s does not have RMA support. Update to at '
'least %s' % (version, rma_support))
def _running_version_is_older(self, target_ver):
"""Returns True if running version is older than target_ver."""
target_ver_fields = [int(field) for field in target_ver.split('.')]
for i, field in enumerate(self.running_ver_fields):
if field > int(target_ver_fields[i]):
return False
return True
def device_matches_devid(self, devid, device):
"""Return True if the device matches devid.
Use the sysinfo output from device to determine if it matches devid
Returns:
True if sysinfo from device shows the given devid. False if there
is no output or sysinfo doesn't contain the devid.
"""
self.set_cr50_device(device)
sysinfo = self.send_cmd_get_output('sysinfo')
# Make sure there is some output, and it shows it's from Cr50
if not sysinfo or 'cr50' not in sysinfo:
return False
logging.debug('Sysinfo output: %s', sysinfo)
# The cr50 device id should be in the sysinfo output, if we found
# the right console. Make sure it is
return devid in sysinfo
def find_cr50_device(self, usb_serial):
"""Find the cr50 console device
The Cr50 usb serialname matches the cr50 devid. Convert the serialname
to devid. Use that to check all of the consoles and find cr50's.
Args:
usb_serial: an optional string. The serialname of the cr50 usb
device
Raises:
ValueError if the console can't be found with the given serialname
"""
usb_serial = self.find_cr50_usb(usb_serial)
logging.info('SERIALNAME: %s', usb_serial)
devid = '0x' + ' 0x'.join(usb_serial.lower().split('-'))
logging.info('DEVID: %s', devid)
# Get all the usb devices
devices = glob.glob('/dev/ttyUSB*')
# Typically Cr50 has the lowest number. Sort the devices, so we're more
# likely to try the cr50 console first.
devices.sort()
# Find the one that is the cr50 console
for device in devices:
logging.info('testing %s', device)
if self.device_matches_devid(devid, device):
logging.info('found device: %s', device)
return
logging.warning(DEBUG_CONNECTION)
raise ValueError('Found USB device, but could not communicate with '
'cr50 console')
def print_platform_info(self):
"""Print the cr50 BID RLZ code"""
bid_output = self.send_cmd_get_output('bid')
bid = re.search(r'Board ID: (\S+?)[:,]', bid_output).group(1)
if bid == ERASED_BID:
logging.warning(DEBUG_ERASED_BOARD_ID)
raise ValueError('Cannot run RMA Open when board id is erased')
bid = int(bid, 16)
chrs = [chr((bid >> (8 * i)) & 0xff) for i in range(4)]
logging.info('RLZ: %s', ''.join(chrs[::-1]))
@staticmethod
def find_cr50_usb(usb_serial):
"""Make sure the Cr50 USB device exists"""
try:
output = subprocess.check_output(CR50_LSUSB_CMD, encoding='utf-8')
except:
logging.warning(DEBUG_MISSING_USB)
raise ValueError('Could not find Cr50 USB device')
serialnames = re.findall(r'iSerial +\d+ (\S+)\s', output)
if usb_serial:
if usb_serial not in serialnames:
logging.warning(DEBUG_SERIALNAME)
raise ValueError('Could not find usb device "%s"' % usb_serial)
return usb_serial
if len(serialnames) > 1:
logging.info('Found Cr50 device serialnames %s',
', '.join(serialnames))
logging.warning(DEBUG_TOO_MANY_USB_DEVICES)
raise ValueError('Too many cr50 usb devices')
return serialnames[0]
def print_dut_state(self):
"""Print CCD RMA and testlab mode state."""
if not self.check(CCD_IS_UNRESTRICTED):
logging.info('CCD is still restricted.')
logging.info('Run cr50_rma_open.py -g -i $HWID to generate a url')
logging.info('Run cr50_rma_open.py -a $AUTHCODE to open cr50 with '
'an authcode')
elif not self.check(WP_IS_DISABLED):
logging.info('WP is still enabled.')
logging.info('Run cr50_rma_open.py -w to disable write protect')
if self.check(RMA_OPENED):
logging.info('RMA Open complete')
if not self.check(TESTLAB_IS_ENABLED) and self.is_prepvt:
logging.info('testlab mode is disabled.')
logging.info('If you are prepping a device for the testlab, you '
'should enable testlab mode.')
logging.info('Run cr50_rma_open.py -t to enable testlab mode')
def parse_args(argv):
"""Get cr50_rma_open args."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-g', '--generate_challenge', action='store_true',
help='Generate Cr50 challenge. Must be used with -i')
parser.add_argument('-t', '--enable_testlab', action='store_true',
help='enable testlab mode')
parser.add_argument('-w', '--wp_disable', action='store_true',
help='Disable write protect')
parser.add_argument('-c', '--check_connection', action='store_true',
help='Check cr50 console connection works')
parser.add_argument('-s', '--serialname', type=str, default='',
help='The cr50 usb serialname')
parser.add_argument('-D', '--debug', action='store_true',
help='print debug messages')
parser.add_argument('-d', '--device', type=str, default='',
help='cr50 console device ex /dev/ttyUSB0')
parser.add_argument('-i', '--hwid', type=str, default='',
help='The board hwid. Needed to generate a challenge')
parser.add_argument('-a', '--authcode', type=str, default='',
help='The authcode string from the challenge url')
parser.add_argument('-P', '--servo_port', type=str, default='',
help='the servo port')
parser.add_argument('-I', '--ip', type=str, default='',
help='The DUT IP. Necessary to do ccd open')
return parser.parse_args(argv)
def main(argv):
"""Run cr50 rma open."""
opts = parse_args(argv)
loglevel = logging.INFO
log_format = '%(levelname)7s'
if opts.debug:
loglevel = logging.DEBUG
log_format += ' - %(lineno)3d:%(funcName)-15s'
log_format += ' - %(message)s'
logging.basicConfig(level=loglevel, format=log_format)
tried_authcode = False
logging.info('Running cr50_rma_open version %s', SCRIPT_VERSION)
cr50_rma_open = RMAOpen(opts.device, opts.serialname, opts.servo_port,
opts.ip)
if opts.check_connection:
sys.exit(0)
if not cr50_rma_open.check(CCD_IS_UNRESTRICTED):
if opts.generate_challenge:
if not opts.hwid:
logging.warning('--hwid necessary to generate challenge url')
sys.exit(0)
cr50_rma_open.generate_challenge_url(opts.hwid)
sys.exit(0)
elif opts.authcode:
logging.info('Using authcode: %s', opts.authcode)
cr50_rma_open.try_authcode(opts.authcode)
tried_authcode = True
if not cr50_rma_open.check(WP_IS_DISABLED) and (tried_authcode or
opts.wp_disable):
if not cr50_rma_open.check(CCD_IS_UNRESTRICTED):
raise ValueError("Can't disable write protect unless ccd is "
"open. Run through the rma open process first")
if tried_authcode:
logging.warning('RMA Open did not disable write protect. File a '
'bug')
logging.warning('Trying to disable it manually')
cr50_rma_open.wp_disable()
if not cr50_rma_open.check(TESTLAB_IS_ENABLED) and opts.enable_testlab:
if not cr50_rma_open.check(CCD_IS_UNRESTRICTED):
raise ValueError("Can't enable testlab mode unless ccd is open."
"Run through the rma open process first")
cr50_rma_open.enable_testlab()
cr50_rma_open.print_dut_state()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
coreboot/chrome-ec
|
extra/cr50_rma_open/cr50_rma_open.py
|
Python
|
bsd-3-clause
| 26,490 | 0.000227 |
""" pydatastream main module
(c) Vladimir Filimonov, 2013 - 2021
"""
import warnings
import json
import math
from functools import wraps
import requests
import pandas as pd
###############################################################################
_URL = 'https://product.datastream.com/dswsclient/V1/DSService.svc/rest/'
_FLDS_XREF = ('DSCD,EXMNEM,GEOGC,GEOGN,IBTKR,INDC,INDG,INDM,INDX,INDXEG,'
'INDXFS,INDXL,INDXS,ISIN,ISINID,LOC,MNEM,NAME,SECD,TYPE'.split(','))
_FLDS_XREF_FUT = ('MNEM,NAME,FLOT,FEX,GEOGC,GEOGN,EXCODE,LTDT,FUTBDATE,PCUR,ISOCUR,'
'TICKS,TICKV,TCYCLE,TPLAT'.split(','))
_ASSET_TYPE_CODES = {'BD': 'Bonds & Convertibles',
'BDIND': 'Bond Indices & Credit Default Swaps',
'CMD': 'Commodities',
'EC': 'Economics',
'EQ': 'Equities',
'EQIND': 'Equity Indices',
'EX': 'Exchange Rates',
'FT': 'Futures',
'INT': 'Interest Rates',
'INVT': 'Investment Trusts',
'OP': 'Options',
'UT': 'Unit Trusts',
'EWT': 'Warrants',
'NA': 'Not available'}
###############################################################################
_INFO = """PyDatastream documentation (GitHub):
https://github.com/vfilimonov/pydatastream
Datastream Navigator:
http://product.datastream.com/navigator/
Official support
https://customers.reuters.com/sc/Contactus/simple?product=Datastream&env=PU&TP=Y
Webpage for testing REST API requests
http://product.datastream.com/dswsclient/Docs/TestRestV1.aspx
Documentation for DSWS API
http://product.datastream.com/dswsclient/Docs/Default.aspx
Datastream Web Service Developer community
https://developers.refinitiv.com/eikon-apis/datastream-web-service
"""
###############################################################################
###############################################################################
def _convert_date(date):
""" Convert date to YYYY-MM-DD """
if date is None:
return ''
if isinstance(date, str) and (date.upper() == 'BDATE'):
return 'BDATE'
return pd.Timestamp(date).strftime('%Y-%m-%d')
def _parse_dates(dates):
""" Parse dates
Example:
/Date(1565817068486) -> 2019-08-14T21:11:08.486000000
/Date(1565568000000+0000) -> 2019-08-12T00:00:00.000000000
"""
if dates is None:
return None
if isinstance(dates, str):
return pd.Timestamp(_parse_dates([dates])[0])
res = [int(_[6:(-7 if '+' in _ else -2)]) for _ in dates]
return pd.to_datetime(res, unit='ms').values
class DatastreamException(Exception):
""" Exception class for Datastream """
###############################################################################
def lazy_property(fn):
""" Lazy-evaluated property of an object """
attr_name = '__lazy__' + fn.__name__
@property
@wraps(fn)
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
###############################################################################
# Main Datastream class
###############################################################################
class Datastream():
""" Python interface to the Refinitiv Datastream API via Datastream Web
Services (DSWS).
"""
def __init__(self, username, password, raise_on_error=True, proxy=None, **kwargs):
"""Establish a connection to the Python interface to the Refinitiv Datastream
(former Thomson Reuters Datastream) API via Datastream Web Services (DSWS).
username / password - credentials for the DSWS account.
raise_on_error - If True then error request will raise a "DatastreamException",
otherwise either empty dataframe or partially
retrieved data will be returned
proxy - URL for the proxy server. Valid values:
(a) None: no proxy is used
(b) string of format "host:port" or "username:password@host:port"
Note: credentials will be saved in memory. In case if this is not
desirable for security reasons, call the constructor having None
instead of values and manually call renew_token(username, password)
when needed.
A custom REST API url (if necessary for some reasons) could be provided
via "url" parameter.
"""
self.raise_on_error = raise_on_error
self.last_request = None
self.last_metadata = None
self._last_response_raw = None
# Setting up proxy parameters if necessary
if isinstance(proxy, str):
self._proxy = {'http': proxy, 'https': proxy}
elif proxy is None:
self._proxy = None
else:
raise ValueError('Proxy parameter should be either None or string')
self._url = kwargs.pop('url', _URL)
self._username = username
self._password = password
# request new token
self.renew_token(username, password)
###########################################################################
@staticmethod
def info():
""" Some useful links """
print(_INFO)
###########################################################################
def _api_post(self, method, request):
""" Call to the POST method of DSWS API """
url = self._url + method
self.last_request = {'url': url, 'request': request, 'error': None}
self.last_metadata = None
try:
res = requests.post(url, json=request, proxies=self._proxy)
self.last_request['response'] = res.text
except Exception as e:
self.last_request['error'] = str(e)
raise
try:
response = self.last_request['response'] = json.loads(self.last_request['response'])
except json.JSONDecodeError as e:
raise DatastreamException('Server response could not be parsed') from e
if 'Code' in response:
code = response['Code']
if response['SubCode'] is not None:
code += '/' + response['SubCode']
errormsg = f'{code}: {response["Message"]}'
self.last_request['error'] = errormsg
raise DatastreamException(errormsg)
return self.last_request['response']
###########################################################################
def renew_token(self, username=None, password=None):
""" Request new token from the server """
if username is None or password is None:
warnings.warn('Username or password is not provided - could not renew token')
return
data = {"UserName": username, "Password": password}
self._token = dict(self._api_post('GetToken', data))
self._token['TokenExpiry'] = _parse_dates(self._token['TokenExpiry']).tz_localize('UTC')
# Token is invalidated 15 minutes before exporation time
# Note: According to https://github.com/vfilimonov/pydatastream/issues/27
# tokens do not always respect the (as of now 24 hours) expiry time
# So for this reason I limit the token life at 6 hours.
self._token['RenewTokenAt'] = min(self._token['TokenExpiry'] - pd.Timedelta('15m'),
pd.Timestamp.utcnow() + pd.Timedelta('6H'))
@property
def _token_is_expired(self):
if self._token is None:
return True
if pd.Timestamp.utcnow() > self._token['RenewTokenAt']:
return True
return False
@property
def token(self):
""" Return actual token and renew it if necessary. """
if self._token_is_expired:
self.renew_token(self._username, self._password)
return self._token['TokenValue']
###########################################################################
def request(self, request):
""" Generic wrapper to request data in raw format. Request should be
properly formatted dictionary (see construct_request() method).
"""
data = {'DataRequest': request, 'TokenValue': self.token}
return self._api_post('GetData', data)
def request_many(self, list_of_requests):
""" Generic wrapper to request multiple requests in raw format.
list_of_requests should be a list of properly formatted dictionaries
(see construct_request() method).
"""
data = {'DataRequests': list_of_requests, 'TokenValue': self.token}
return self._api_post('GetDataBundle', data)
###########################################################################
@staticmethod
def construct_request(ticker, fields=None, date_from=None, date_to=None,
freq=None, static=False, IsExpression=None,
return_names=True):
"""Construct a request string for querying TR DSWS.
tickers - ticker or symbol, or list of symbols
fields - field or list of fields.
date_from, date_to - date range (used only if "date" is not specified)
freq - frequency of data: daily('D'), weekly('W') or monthly('M')
static - True for static (snapshot) requests
IsExpression - if True, it will explicitly assume that list of tickers
contain expressions. Otherwise it will try to infer it.
Some of available fields:
P - adjusted closing price
PO - opening price
PH - high price
PL - low price
VO - volume, which is expressed in 1000's of shares.
UP - unadjusted price
OI - open interest
MV - market value
EPS - earnings per share
DI - dividend index
MTVB - market to book value
PTVB - price to book value
...
The full list of data fields is available at http://dtg.tfn.com/.
"""
req = {'Instrument': {}, 'Date': {}, 'DataTypes': []}
# Instruments
if isinstance(ticker, str):
# ticker = ticker ## Nothing to change
is_list = None
elif hasattr(ticker, '__len__'):
ticker = ','.join(ticker)
is_list = True
else:
raise ValueError('ticker should be either string or list/array of strings')
# Properties of instruments
props = []
if is_list or (is_list is None and ',' in ticker):
props.append({'Key': 'IsList', 'Value': True})
if IsExpression or (IsExpression is None and
('#' in ticker or '(' in ticker or ')' in ticker)):
props.append({'Key': 'IsExpression', 'Value': True})
if return_names:
props.append({'Key': 'ReturnName', 'Value': True})
req['Instrument'] = {'Value': ticker, 'Properties': props}
# DataTypes
props = [{'Key': 'ReturnName', 'Value': True}] if return_names else []
if fields is not None:
if isinstance(fields, str):
req['DataTypes'].append({'Value': fields, 'Properties': props})
elif isinstance(fields, list) and fields:
for f in fields:
req['DataTypes'].append({'Value': f, 'Properties': props})
else:
raise ValueError('fields should be either string or list/array of strings')
# Dates
req['Date'] = {'Start': _convert_date(date_from),
'End': _convert_date(date_to),
'Frequency': freq if freq is not None else '',
'Kind': 0 if static else 1}
return req
###########################################################################
@staticmethod
def _parse_meta(meta):
""" Parse SymbolNames, DataTypeNames and Currencies """
res = {}
for key in meta:
if key in ('DataTypeNames', 'SymbolNames'):
if not meta[key]: # None or empty list
res[key] = None
else:
names = pd.DataFrame(meta[key]).set_index('Key')['Value']
names.index.name = key.replace('Names', '')
names.name = 'Name'
res[key] = names
elif key == 'Currencies':
cur = pd.concat({key: pd.DataFrame(meta['Currencies'][key])
for key in meta['Currencies']})
res[key] = cur.xs('Currency', level=1).T
else:
res[key] = meta[key]
return res
def _parse_one(self, res):
""" Parse one response (either 'DataResponse' or one of 'DataResponses')"""
data = res['DataTypeValues']
dates = _parse_dates(res['Dates'])
res_meta = {_: res[_] for _ in res if _ not in ['DataTypeValues', 'Dates']}
# Parse values
meta = {}
res = {}
for d in data:
data_type = d['DataType']
res[data_type] = {}
meta[data_type] = {}
for v in d['SymbolValues']:
value = v['Value']
if v['Type'] == 0: # Error
if self.raise_on_error:
raise DatastreamException(f'"{v["Symbol"]}"("{data_type}"): {value}')
res[data_type][v['Symbol']] = math.nan
elif v['Type'] == 4: # Date
res[data_type][v['Symbol']] = _parse_dates(value)
else:
res[data_type][v['Symbol']] = value
meta[data_type][v['Symbol']] = {_: v[_] for _ in v if _ != 'Value'}
if dates is None:
# Fix - if dates are not returned, then simply use integer index
dates = [0]
res[data_type] = pd.DataFrame(res[data_type], index=dates)
res = pd.concat(res).unstack(level=1).T.sort_index()
res_meta['Currencies'] = meta
return res, self._parse_meta(res_meta)
def parse_response(self, response, return_metadata=False):
""" Parse raw JSON response
If return_metadata is True, then result is tuple (dataframe, metadata),
where metadata is a dictionary. Otherwise only dataframe is returned.
In case of response being constructed from several requests (method
request_many()), then the result is a list of parsed responses. Here
again, if return_metadata is True then each element is a tuple
(dataframe, metadata), otherwise each element is a dataframe.
"""
if 'DataResponse' in response: # Single request
res, meta = self._parse_one(response['DataResponse'])
self.last_metadata = meta
return (res, meta) if return_metadata else res
if 'DataResponses' in response: # Multiple requests
results = [self._parse_one(r) for r in response['DataResponses']]
self.last_metadata = [_[1] for _ in results]
return results if return_metadata else [_[0] for _ in results]
raise DatastreamException('Neither DataResponse nor DataResponses are found')
###########################################################################
def usage_statistics(self, date=None, months=1):
""" Request usage statistics
date - if None, stats for the current month will be fetched,
otherwise for the month which contains the specified date.
months - number of consecutive months prior to "date" for which
stats should be retrieved.
"""
if date is None:
date = pd.Timestamp('now').normalize() - pd.offsets.MonthBegin()
req = [self.construct_request('STATS', 'DS.USERSTATS',
date-pd.offsets.MonthBegin(n), static=True)
for n in range(months)][::-1]
res = self.parse_response(self.request_many(req))
res = pd.concat(res)
res.index = res['Start Date'].dt.strftime('%B %Y')
res.index.name = None
return res
###########################################################################
def fetch(self, tickers, fields=None, date_from=None, date_to=None,
freq=None, static=False, IsExpression=None, return_metadata=False,
always_multiindex=False):
"""Fetch the data from Datastream for a set of tickers and parse results.
tickers - ticker or symbol, or list of symbols
fields - field or list of fields
date_from, date_to - date range (used only if "date" is not specified)
freq - frequency of data: daily('D'), weekly('W') or monthly('M')
static - True for static (snapshot) requests
IsExpression - if True, it will explicitly assume that list of tickers
contain expressions. Otherwise it will try to infer it.
return_metadata - if True, then tuple (data, metadata) will be returned,
otherwise only data
always_multiindex - if True, then even for 1 ticker requested, resulting
dataframe will have multiindex (ticker, date).
If False (default), then request of single ticker will
result in a dataframe indexed by date only.
Notes: - several fields should be passed as a list, and not as a
comma-separated string!
- if no fields are provided, then the default field will be
fetched. In this case the column might not have any name
in the resulting dataframe.
Result format depends on the number of requested tickers and fields:
- 1 ticker - DataFrame with fields in column names
(in order to keep multiindex even for single
ticker, set `always_multiindex` to True)
- several tickers - DataFrame with fields in column names and
MultiIndex (ticker, date)
- static request - DataFrame indexed by tickers and with fields
in column names
Some of available fields:
P - adjusted closing price
PO - opening price
PH - high price
PL - low price
VO - volume, which is expressed in 1000's of shares.
UP - unadjusted price
OI - open interest
MV - market value
EPS - earnings per share
DI - dividend index
MTVB - market to book value
PTVB - price to book value
...
"""
req = self.construct_request(tickers, fields, date_from, date_to,
freq=freq, static=static,
IsExpression=IsExpression,
return_names=return_metadata)
raw = self.request(req)
self._last_response_raw = raw
data, meta = self.parse_response(raw, return_metadata=True)
if static:
# Static request - drop date from MultiIndex
data = data.reset_index(level=1, drop=True)
elif len(data.index.levels[0]) == 1:
# Only one ticker - drop tickers from MultiIndex
if not always_multiindex:
data = data.reset_index(level=0, drop=True)
return (data, meta) if return_metadata else data
###########################################################################
# Specific fetching methods
###########################################################################
def get_OHLCV(self, ticker, date_from=None, date_to=None):
"""Get Open, High, Low, Close prices and daily Volume for a given ticker.
ticker - ticker or symbol
date_from, date_to - date range (used only if "date" is not specified)
"""
return self.fetch(ticker, ['PO', 'PH', 'PL', 'P', 'VO'], date_from, date_to,
freq='D', return_metadata=False)
def get_OHLC(self, ticker, date_from=None, date_to=None):
"""Get Open, High, Low and Close prices for a given ticker.
ticker - ticker or symbol
date_from, date_to - date range (used only if "date" is not specified)
"""
return self.fetch(ticker, ['PO', 'PH', 'PL', 'P'], date_from, date_to,
freq='D', return_metadata=False)
def get_price(self, ticker, date_from=None, date_to=None):
"""Get Close price for a given ticker.
ticker - ticker or symbol
date_from, date_to - date range (used only if "date" is not specified)
"""
return self.fetch(ticker, 'P', date_from, date_to,
freq='D', return_metadata=False)
###########################################################################
def get_constituents(self, index_ticker, only_list=False):
""" Get a list of all constituents of a given index.
index_ticker - Datastream ticker for index
only_list - request only list of symbols. By default the method
retrieves many extra fields with information (various
mnemonics and codes). This might pose some problems
for large indices like Russel-3000. If only_list=True,
then only the list of symbols and names are retrieved.
NOTE: In contrast to retired DWE interface, DSWS does not support
fetching historical lists of constituents.
"""
if only_list:
fields = ['MNEM', 'NAME']
else:
fields = _FLDS_XREF
return self.fetch('L' + index_ticker, fields, static=True)
def get_all_listings(self, ticker):
""" Get all listings and their symbols for the given security
"""
res = self.fetch(ticker, 'QTEALL', static=True)
columns = list({_[:2] for _ in res.columns})
# Reformat the output
df = {}
for ind in range(1, 21):
cols = {f'{c}{ind:02d}': c for c in columns}
df[ind] = res[cols.keys()].rename(columns=cols)
df = pd.concat(df).swaplevel(0).sort_index()
df = df[~(df == '').all(axis=1)]
return df
def get_codes(self, ticker):
""" Get codes and symbols for the given securities
"""
return self.fetch(ticker, _FLDS_XREF, static=True)
###########################################################################
def get_asset_types(self, symbols):
""" Get asset types for a given list of symbols
Note: the method does not
"""
res = self.fetch(symbols, 'TYPE', static=True, IsExpression=False)
names = pd.Series(_ASSET_TYPE_CODES).to_frame(name='AssetTypeName')
res = res.join(names, on='TYPE')
# try to preserve the order
if isinstance(symbols, (list, pd.Series)):
try:
res = res.loc[symbols]
except KeyError:
pass # OK, we don't keep the order if not possible
return res
###########################################################################
def get_futures_contracts(self, market_code, only_list=False, include_dead=False):
""" Get list of all contracts for a given futures market
market_code - Datastream mnemonic for a market (e.g. LLC for the
Brent Crude Oil, whose contracts have mnemonics like
LLC0118 for January 2018)
only_list - request only list of symbols. By default the method
retrieves many extra fields with information (currency,
lot size, last trading date, etc). If only_list=True,
then only the list of symbols and names are retrieved.
include_dead - if True, all delisted/expired contracts will be fetched
as well. Otherwise only active contracts will be returned.
"""
if only_list:
fields = ['MNEM', 'NAME']
else:
fields = _FLDS_XREF_FUT
res = self.fetch(f'LFUT{market_code}L', fields, static=True)
res['active'] = True
if include_dead:
res2 = self.fetch(f'LFUT{market_code}D', fields, static=True)
res2['active'] = False
res = pd.concat([res, res2])
return res[res.MNEM != 'NA'] # Drop lines with empty mnemonics
###########################################################################
def get_epit_vintage_matrix(self, mnemonic, date_from='1951-01-01', date_to=None):
""" Construct the vintage matrix for a given economic series.
Requires subscription to Thomson Reuters Economic Point-in-Time (EPiT).
Vintage matrix represents a DataFrame where columns correspond to a
particular period (quarter or month) for the reported statistic and
index represents timestamps at which these values were released by
the respective official agency. I.e. every line corresponds to all
available reported values by the given date.
For example:
>> DS.get_epit_vintage_matrix('USGDP...D', date_from='2015-01-01')
2015-02-15 2015-05-15 2015-08-15 2015-11-15 \
2015-04-29 16304.80 NaN NaN NaN
2015-05-29 16264.10 NaN NaN NaN
2015-06-24 16287.70 NaN NaN NaN
2015-07-30 16177.30 16270.400 NaN NaN
2015-08-27 16177.30 16324.300 NaN NaN
2015-09-25 16177.30 16333.600 NaN NaN
2015-10-29 16177.30 16333.600 16394.200 NaN
2015-11-24 16177.30 16333.600 16417.800 NaN
From the matrix it is seen for example, that the advance GDP estimate
for 2015-Q1 (corresponding to 2015-02-15) was released on 2015-04-29
and was equal to 16304.80 (B USD). The first revision (16264.10) has
happened on 2015-05-29 and the second (16287.70) - on 2015-06-24.
On 2015-07-30 the advance GDP figure for 2015-Q2 was released
(16270.400) together with update on the 2015-Q1 value (16177.30)
and so on.
"""
# Get first available date from the REL1 series
rel1 = self.fetch(mnemonic, 'REL1', date_from=date_from, date_to=date_to)
date_0 = rel1.dropna().index[0]
# All release dates
reld123 = self.fetch(mnemonic, ['RELD1', 'RELD2', 'RELD3'],
date_from=date_0, date_to=date_to).dropna(how='all')
# Fetch all vintages
res = {}
for date in reld123.index:
try:
_tmp = self.fetch(mnemonic, 'RELV', date_from=date_0, date_to=date).dropna()
except DatastreamException:
continue
res[date] = _tmp
return pd.concat(res).RELV.unstack()
###########################################################################
def get_epit_revisions(self, mnemonic, period, relh50=False):
""" Return initial estimate and first revisions of a given economic time
series and a given period.
Requires subscription to Thomson Reuters Economic Point-in-Time (EPiT).
"Period" parameter should represent a date which falls within a time
period of interest, e.g. 2016 Q4 could be requested with the
period='2016-11-15' for example.
By default up to 20 values is returned unless argument "relh50" is
set to True (in which case up to 50 values is returned).
"""
if relh50:
data = self.fetch(mnemonic, 'RELH50', date_from=period, static=True)
else:
data = self.fetch(mnemonic, 'RELH', date_from=period, static=True)
data = data.iloc[0]
# Parse the response
res = {data.loc['RELHD%02d' % i]: data.loc['RELHV%02d' % i]
for i in range(1, 51 if relh50 else 21)
if data.loc['RELHD%02d' % i] != ''}
res = pd.Series(res, name=data.loc['RELHP ']).sort_index()
return res
###########################################################################
def get_next_release_dates(self, mnemonics, n_releases=1):
""" Return the next date of release (NDoR) for a given economic series.
Could return results for up to 12 releases in advance.
Returned fields:
DATE - Date of release (or start of range where the exact
date is not available)
DATE_LATEST - End of range when a range is given
TIME_GMT - Expected time of release (for "official" dates only)
DATE_FLAG - Indicates whether the dates are "official" ones from
the source, or estimated by Thomson Reuters where
"officials" not available
REF_PERIOD - Corresponding reference period for the release
TYPE - Indicates whether the release is for a new reference
period ("NewValue") or an update to a period for which
there has already been a release ("ValueUpdate")
"""
if n_releases > 12:
raise Exception('Only up to 12 months in advance could be requested')
if n_releases < 1:
raise Exception('n_releases smaller than 1 does not make sense')
# Fetch and parse
reqs = [self.construct_request(mnemonics, f'DS.NDOR{i+1}', static=True)
for i in range(n_releases)]
res_parsed = self.parse_response(self.request_many(reqs))
# Rearrange the output
res = []
for r in res_parsed:
x = r.reset_index(level=1, drop=True)
x.index.name = 'Mnemonic'
# Index of the release counting from now
ndor_idx = [_.split('_')[0] for _ in x.columns][0]
x['ReleaseNo'] = int(ndor_idx.replace('DS.NDOR', ''))
x = x.set_index('ReleaseNo', append=True)
x.columns = [_.replace(ndor_idx+'_', '') for _ in x.columns]
res.append(x)
res = pd.concat(res).sort_index()
for col in ['DATE', 'DATE_LATEST', 'REF_PERIOD']:
res[col] = pd.to_datetime(res[col], errors='coerce')
return res
###########################################################################
@lazy_property
def vacations_list(self):
""" List of mnemonics for holidays in different countries """
res = self.fetch('HOLIDAYS', ['MNEM', 'ENAME', 'GEOGN', 'GEOGC'], static=True)
return res[res['MNEM'] != 'NA'].sort_values('GEOGC')
def get_trading_days(self, countries, date_from=None, date_to=None):
""" Get list of trading dates for a given countries (speficied by ISO-2c)
Returning dataframe will contain values 1 and NaN, where 1 identifies
the business day.
So by multiplying this list with the price time series it will remove
padded values on non-trading days.
Example:
DS.get_trading_days(['US', 'UK', 'RS'], date_from='2010-01-01')
"""
if isinstance(countries, str):
countries = [countries]
vacs = self.vacations_list
mnems = vacs[vacs.GEOGC.isin(countries)]
missing_isos = set(countries).difference(mnems.GEOGC)
if missing_isos:
raise DatastreamException(f'Unknowns ISO codes: {", ".join(missing_isos)}')
# By default 0 and NaN are returned, so we add 1
res = self.fetch(mnems.MNEM, date_from=date_from, date_to=date_to) + 1
if len(countries) == 1:
return res.iloc[:, 0].to_frame(name=countries[0])
return (res.iloc[:, 0].unstack(level=0)
.rename(columns=mnems.set_index('MNEM')['GEOGC'])[countries])
|
vfilimonov/pydatastream
|
pydatastream/pydatastream.py
|
Python
|
mit
| 32,857 | 0.002404 |
# coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DLT2T.registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from DLT2T.utils import modality
from DLT2T.utils import registry
from DLT2T.utils import t2t_model
import tensorflow as tf
# pylint: disable=unused-variable
class ModelRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testT2TModelRegistration(self):
@registry.register_model
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)
def testNamedRegistration(self):
@registry.register_model("model2")
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("model2")
self.assertTrue(model is MyModel1)
def testNonT2TModelRegistration(self):
@registry.register_model
def model_fn():
pass
model = registry.model("model_fn")
self.assertTrue(model is model_fn)
def testUnknownModel(self):
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.model("not_registered")
def testDuplicateRegistration(self):
@registry.register_model
def m1():
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_model("m1")
def m2():
pass
def testListModels(self):
@registry.register_model
def m1():
pass
@registry.register_model
def m2():
pass
self.assertSetEqual(set(["m1", "m2"]), set(registry.list_models()))
def testSnakeCase(self):
convert = registry._convert_camel_to_snake
self.assertEqual("typical_camel_case", convert("TypicalCamelCase"))
self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2gether"))
self.assertEqual("numbers_fuse2_gether", convert("NumbersFuse2Gether"))
self.assertEqual("lstm_seq2_seq", convert("LSTMSeq2Seq"))
self.assertEqual("starts_lower", convert("startsLower"))
self.assertEqual("starts_lower_caps", convert("startsLowerCAPS"))
self.assertEqual("caps_fuse_together", convert("CapsFUSETogether"))
self.assertEqual("startscap", convert("Startscap"))
self.assertEqual("s_tartscap", convert("STartscap"))
class HParamRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testHParamSet(self):
@registry.register_hparams
def my_hparams_set():
pass
@registry.register_ranged_hparams
def my_hparams_range(_):
pass
self.assertTrue(registry.hparams("my_hparams_set") is my_hparams_set)
self.assertTrue(
registry.ranged_hparams("my_hparams_range") is my_hparams_range)
def testNamedRegistration(self):
@registry.register_hparams("a")
def my_hparams_set():
pass
@registry.register_ranged_hparams("a")
def my_hparams_range(_):
pass
self.assertTrue(registry.hparams("a") is my_hparams_set)
self.assertTrue(registry.ranged_hparams("a") is my_hparams_range)
def testUnknownHparams(self):
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.hparams("not_registered")
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.ranged_hparams("not_registered")
def testDuplicateRegistration(self):
@registry.register_hparams
def hp1():
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_hparams("hp1")
def hp2():
pass
@registry.register_ranged_hparams
def rhp1(_):
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_ranged_hparams("rhp1")
def rhp2(_):
pass
def testListHparams(self):
@registry.register_hparams
def hp1():
pass
@registry.register_hparams("hp2_named")
def hp2():
pass
@registry.register_ranged_hparams
def rhp1(_):
pass
@registry.register_ranged_hparams("rhp2_named")
def rhp2(_):
pass
self.assertSetEqual(set(["hp1", "hp2_named"]), set(registry.list_hparams()))
self.assertSetEqual(
set(["rhp1", "rhp2_named"]), set(registry.list_ranged_hparams()))
def testRangeSignatureCheck(self):
with self.assertRaisesRegexp(ValueError, "must take a single argument"):
@registry.register_ranged_hparams
def rhp_bad():
pass
with self.assertRaisesRegexp(ValueError, "must take a single argument"):
@registry.register_ranged_hparams
def rhp_bad2(a, b): # pylint: disable=unused-argument
pass
class ModalityRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testModalityRegistration(self):
@registry.register_symbol_modality
class MySymbolModality(modality.Modality):
pass
@registry.register_audio_modality
class MyAudioModality(modality.Modality):
pass
@registry.register_image_modality
class MyImageModality(modality.Modality):
pass
@registry.register_class_label_modality
class MyClassLabelModality(modality.Modality):
pass
self.assertTrue(
registry.symbol_modality("my_symbol_modality") is MySymbolModality)
self.assertTrue(
registry.audio_modality("my_audio_modality") is MyAudioModality)
self.assertTrue(
registry.image_modality("my_image_modality") is MyImageModality)
self.assertTrue(
registry.class_label_modality("my_class_label_modality") is
MyClassLabelModality)
def testDefaultNameLookup(self):
@registry.register_symbol_modality("default")
class MyDefaultModality(modality.Modality):
pass
self.assertTrue(registry.symbol_modality() is MyDefaultModality)
def testList(self):
@registry.register_symbol_modality
class MySymbolModality(modality.Modality):
pass
@registry.register_audio_modality
class MyAudioModality(modality.Modality):
pass
@registry.register_image_modality
class MyImageModality(modality.Modality):
pass
@registry.register_class_label_modality
class MyClassLabelModality(modality.Modality):
pass
expected = [
"symbol:my_symbol_modality", "audio:my_audio_modality",
"image:my_image_modality", "class_label:my_class_label_modality"
]
self.assertSetEqual(set(registry.list_modalities()), set(expected))
if __name__ == "__main__":
tf.test.main()
|
renqianluo/DLT2T
|
DLT2T/utils/registry_test.py
|
Python
|
apache-2.0
| 7,045 | 0.008375 |
salario = float(raw_input())
print(salario + (salario * 0.78))
|
matheusmonte/PythonScripts
|
Salario078.py
|
Python
|
mit
| 62 | 0.016129 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from dashboard import web
class TestWebUtils(testtools.TestCase):
def setUp(self):
super(TestWebUtils, self).setUp()
def test_make_commit_message(self):
message = '''
During finish_migration the manager calls initialize_connection but doesn't
update the block_device_mapping with the potentially new connection_info
returned.
Fixes bug 1076801
Change-Id: Ie49ccd2138905e178843b375a9b16c3fe572d1db'''
module = 'test'
record = {
'message': message,
'module': module,
}
expected = '''\
During finish_migration the manager calls initialize_connection but doesn't \
update the block_device_mapping with the potentially new connection_info \
returned.
Fixes bug <a href="https://bugs.launchpad.net/bugs/1076801" class="ext_link">\
1076801</a>
''' + ('Change-Id: <a href="https://review.openstack.org/#q,'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db,n,z" class="ext_link">'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db</a>')
observed = web.make_commit_message(record)
self.assertEqual(expected, observed,
'Commit message should be processed correctly')
def test_make_commit_message_blueprint_link(self):
message = '''
Implemented new driver for Cinder <:
Implements Blueprint super-driver
Change-Id: Ie49ccd2138905e178843b375a9b16c3fe572d1db'''
module = 'cinder'
record = {
'message': message,
'module': module,
}
expected = '''\
Implemented new driver for Cinder <:
Implements Blueprint ''' + (
'<a href="https://blueprints.launchpad.net/cinder/+spec/'
'super-driver" class="ext_link">super-driver</a>' + '\n' +
'Change-Id: <a href="https://review.openstack.org/#q,'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db,n,z" class="ext_link">'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db</a>')
observed = web.make_commit_message(record)
self.assertEqual(expected, observed,
'Commit message should be processed correctly')
@mock.patch('dashboard.web.get_vault')
@mock.patch('dashboard.web.get_user_from_runtime_storage')
def test_make_page_title(self, user_patch, vault_patch):
memory_storage_mock = mock.Mock()
memory_storage_mock.get_original_company_name = mock.Mock(
return_value='Mirantis'
)
vault_patch.return_value = {'memory_storage': memory_storage_mock}
user_patch.return_value = {'user_name': 'John Doe'}
self.assertEqual('OpenStack community contribution in all releases',
web.make_page_title('', '', '', 'all'))
self.assertEqual('OpenStack community contribution in Havana release',
web.make_page_title('', '', '', 'Havana'))
self.assertEqual('Mirantis contribution in Havana release',
web.make_page_title('Mirantis', '', '', 'Havana'))
self.assertEqual('John Doe contribution in Havana release',
web.make_page_title('', 'john_doe', '', 'Havana'))
self.assertEqual(
'John Doe (Mirantis) contribution to neutron in Havana release',
web.make_page_title('Mirantis', 'John Doe', 'neutron', 'Havana'))
|
TechJournalist/stackalytics
|
tests/unit/test_web_utils.py
|
Python
|
apache-2.0
| 3,954 | 0.000506 |
from __future__ import print_function
import numpy as np
from landlab import ModelParameterDictionary
from landlab.core.model_parameter_dictionary import MissingKeyError, ParameterValueError
from landlab.field.scalar_data_fields import FieldError
from landlab.grid.base import BAD_INDEX_VALUE
class StreamPowerEroder(object):
"""
This component is now verified stable for simple m,n specified, followup-to-
Fastscape-flow-routing cases. Threshold appears stable.
The more exciting cases (e.g., specifying a,b,c; forcing with W or Q) are
untested, but should run.
There is as yet no explicit stabilization check on the timestep. If your
run destabilizes, try reducing dt.
See, e.g., ./examples/simple_sp_driver.py
DEJH Sept 2013, major modifications Sept 14.
This component *should* run on any grid, but untested.
"""
def __init__(self, grid, params):
self.initialize(grid, params)
#This draws attention to a potential problem. It will be easy to have modules update z, but because noone "owns" the data, to forget to also update dz/dx...
#How about a built in grid utility that updates "derived" data (i.e., using only grid fns, e.g., slope, curvature) at the end of any given tstep loop?
#Or an explicit flagging system for all variables in the modelfield indicating if they have been updated this timestep. (Currently implemented)
#Or wipe the existance of any derived grid data at the end of a timestep entirely, so modules find they don't have it next timestep.
def initialize(self, grid, params_file):
'''
params_file is the name of the text file containing the parameters
needed for this stream power component.
Module erodes where channels are, implemented as
E = K * A**m * S**n - sp_crit,
and if E<0, E=0.
If 'use_W' is declared and True, the module instead implements:
E = K * A**m * S**n / W - sp_crit
***Parameters for input file***
OBLIGATORY:
K_sp -> positive float, the prefactor. This is defined per unit
time, not per tstep. Type the string 'array' to cause the
component's erode method to look for an array of values of K
(see documentation for 'erode').
ALTERNATIVES:
*either*
m_sp -> positive float, the power on A
and
n_sp -> positive float, the power on S
*or*
sp_type -> String. Must be one of 'Total', 'Unit', or 'Shear_stress'.
and (following Whipple & Tucker 1999)
a_sp -> +ve float. The power on the SP/shear term to get the erosion
rate.
b_sp -> +ve float. The power on discharge to get width, "hydraulic
geometry". Unnecessary if sp_type='Total'.
c_sp -> +ve float. The power on area to get discharge, "basin
hydology".
... If 'Total', m=a*c, n=a.
... If 'Unit', m=a*c*(1-b), n=a.
... If 'Shear_stress', m=2*a*c*(1-b)/3, n = 2*a/3.
OPTIONS:
threshold_sp -> +ve float; the threshold sp_crit. Defaults to 0.
This threshold is assumed to be in "stream power" units, i.e.,
if 'Shear_stress', the value should be tau**a.
dt -> +ve float. If set, this is the fixed timestep for this
component. Can be overridden easily as a parameter in erode().
If not set (default), this parameter MUST be set in erode().
use_W -> Bool; if True, component will look for node-centered data
describing channel width in grid.at_node['channel_width'], and
use it to implement incision ~ stream power per unit width.
Defaults to False. If you set sp_m and sp_n, follows the
equation given above. If you set sp_type, it will be ignored if
'Total', but used directly if you want 'Unit' or 'Shear_stress'.
use_Q -> Bool. If true, the equation becomes E=K*Q**m*S**n.
Effectively sets c=1 in Wh&T's 1999 derivation, if you are
setting m and n through a, b, and c.
'''
self.grid = grid
self.fraction_gradient_change = 1.
self.link_S_with_trailing_blank = np.zeros(grid.number_of_links+1) #needs to be filled with values in execution
self.count_active_links = np.zeros_like(self.link_S_with_trailing_blank, dtype=int)
self.count_active_links[:-1] = 1
inputs = ModelParameterDictionary(params_file)
try:
self._K_unit_time = inputs.read_float('K_sp')
except ParameterValueError: #it was a string
self.use_K = True
else:
self.use_K = False
try:
self.sp_crit = inputs.read_float('threshold_sp')
self.set_threshold = True #flag for sed_flux_dep_incision to see if the threshold was manually set.
print("Found a threshold to use: ", self.sp_crit)
except MissingKeyError:
self.sp_crit = 0.
self.set_threshold = False
try:
self.tstep = inputs.read_float('dt')
except MissingKeyError:
pass
try:
self.use_W = inputs.read_bool('use_W')
except MissingKeyError:
self.use_W = False
try:
self.use_Q = inputs.read_bool('use_Q')
except MissingKeyError:
self.use_Q = False
try:
self._m = inputs.read_float('m_sp')
except MissingKeyError:
self._type = inputs.read_string('sp_type')
self._a = inputs.read_float('a_sp')
try:
self._b = inputs.read_float('b_sp')
except MissingKeyError:
if self.use_W:
self._b = 0.
else:
raise NameError('b was not set')
try:
self._c = inputs.read_float('c_sp')
except MissingKeyError:
if self.use_Q:
self._c = 1.
else:
raise NameError('c was not set')
if self._type == 'Total':
self._n = self._a
self._m = self._a*self._c #==_a if use_Q
elif self._type == 'Unit':
self._n = self._a
self._m = self._a*self._c*(1.-self._b) #==_a iff use_Q&use_W etc
elif self._type == 'Shear_stress':
self._m = 2.*self._a*self._c*(1.-self._b)/3.
self._n = 2.*self._a/3.
else:
raise MissingKeyError('Not enough information was provided on the exponents to use!')
else:
self._n = inputs.read_float('n_sp')
#m and n will always be set, but care needs to be taken to include Q and W directly if appropriate
self.stream_power_erosion = grid.zeros(centering='node')
##Flags for self-building of derived data:
#self.made_link_gradients = False
##This will us the MPD once finalized
##Now perform checks for existance of needed data items:
#try:
# _ = self.grid.at_link['planet_surface__derivative_of_elevation']
#except FieldError:
# self.made_link_gradients = True
def erode(self, grid, dt, node_elevs='topographic__elevation',
node_drainage_areas='drainage_area',
flow_receiver='flow_receiver',
node_order_upstream='upstream_ID_order',
slopes_at_nodes='topographic__steepest_slope',
link_node_mapping='links_to_flow_receiver',
link_slopes=None, slopes_from_elevs=None,
W_if_used=None, Q_if_used=None, K_if_used=None):
"""
A simple, explicit implementation of a stream power algorithm.
*grid* & *dt* are the grid object and timestep (float) respectively.
*node_elevs* is the elevations on the grid, either a field string or
nnodes-long array.
*Node_drainage_areas* tells the component where to look for the drainage
area values. Change to another string to override which grid field the
component looks at, or pass a nnodes-long array of drainage areas values
directly instead.
*flow_receiver* and *node_order_upstream*, the downstream node to which
each node flows and the ordering of the nodes in the network starting
at the outlet, respectively,
are both necessary as inputs to allow stability testing.
If you already have slopes defined at nodes on the grid, pass them to
the component with *slopes_at_nodes*. The same syntax is expected:
string gives a name in the grid fields, an array gives values direct.
Alternatively, set *link_slopes* (and *link_node_mapping*) if this data
is only available at links. 'planet_surface__derivative_of_elevation'
is the default field name for link slopes. Override this name by
setting the variable as the appropriate string, or override use of
grid fields altogether by passing an array. *link_node_mapping* controls
how the component maps these link values onto the arrays. We assume
there is always a 1:1 mapping (pass the values already projected onto
the nodes using slopes_at_nodes if not). Other components, e.g.,
flow_routing.route_flow_dn, may provide the necessary outputs to make
the mapping easier: e.g., just pass 'links_to_flow_reciever' from that
module (the default name). If the component cannot find an existing
mapping through this parameter, it will derive one on the fly, at
considerable cost of speed (see on-screen reports).
*slopes_from_elevs* allows the module to create gradients internally
from elevations rather than have them provided. Set to True to force
the component to look for the data in the location specified by
node_elevs. Using this option is
considerably slower than any of the alternatives, as it also has to
calculate the link_node_mapping from stratch each time.
In both these cases, at present the mapping is to use the maximum
slope of --any-- link attached to the node as the representative node
slope. This is primarily for speed, but may be a good idea to modify
later.
*W_if_used* and *Q_if_used* must be provided if you set use_W and use_Q
respectively in the component initialization. They can be either field
names or nnodes arrays as in the other cases.
NB: If you want spatially or temporally variable runoff, pass the
runoff values at each pixel to the flow router, then pass discharges
at each node using *Q_if_used* to this component.
RETURNS (grid, modified_elevs, stream_power_erosion); modifies grid elevation
fields to reflect updates; creates and maintains
grid.at_node['stream_power_erosion']. Note the value stream_power_erosion
is not an excess stream power; any specified erosion threshold is not
incorporated into it.
"""
active_nodes = grid.get_active_cell_node_ids()
if W_if_used!=None:
assert self.use_W, "Widths were provided, but you didn't set the use_W flag in your input file! Aborting..."
if Q_if_used!=None:
assert self.use_Q, "Discharges were provided, but you didn't set the use_Q flag in your input file! Aborting..."
if K_if_used!=None:
assert self.use_K, "An array of erodabilities was provided, but you didn't set K_sp to 'array' in your input file! Aborting..."
try:
self._K_unit_time = grid.at_node[K_if_used][active_nodes]
except TypeError:
self._K_unit_time = K_if_used[active_nodes]
if type(node_elevs)==str:
node_z = grid.at_node[node_elevs]
else:
node_z = node_elevs
#Perform check on whether we use grid or direct fed data:
try:
self.slopes = grid.at_node[slopes_at_nodes]
except TypeError:
if type(slopes_at_nodes)==np.ndarray:
self.slopes = slopes_at_nodes
else:
raise TypeError('slopes_at_nodes input not recognised')
except FieldError:
if slopes_from_elevs==True:
S_links = (node_z[grid.node_index_at_link_head]-node_z[grid.node_index_at_link_tail])/grid.link_length
else:
if link_slopes:
if type(link_slopes)==str:
S_links = grid.at_link[link_slopes]
else:
S_links = link_slopes
else:
S_links = grid.at_link['planet_surface__derivative_of_elevation']
#put the slopes onto the nodes
try:
self.slopes = S_links[grid.at_node[link_node_mapping]]
except TypeError:
try:
self.slopes = S_links[link_node_mapping]
except IndexError:
#need to do the mapping on the fly.
#we're going to use the max slope (i.e., + or -) of *all* adjacent nodes.
#This isn't ideal. It should probably just be the outs...
#i.e., np.max(self.link_S_with_trailing_blank[grid.node_outlinks] AND -self.link_S_with_trailing_blank[grid.node_inlinks])
self.link_S_with_trailing_blank[:-1] = S_links
self.slopes = np.amax(np.fabs(self.link_S_with_trailing_blank[grid.node_links]),axis=0)
if type(node_drainage_areas)==str:
node_A = grid.at_node[node_drainage_areas]
else:
node_A = node_drainage_areas
if type(flow_receiver)==str:
flow_receiver = grid.at_node[flow_receiver]
if type(node_order_upstream)==str:
node_order_upstream = grid.at_node[node_order_upstream]
#Operate the main function:
if self.use_W==False and self.use_Q==False: #normal case
stream_power_active_nodes = self._K_unit_time * dt * node_A[active_nodes]**self._m * self.slopes[active_nodes]**self._n
elif self.use_W:
try:
W = grid.at_node[W_if_used]
except TypeError:
W = W_if_used
if self.use_Q: #use both Q and W direct
try:
Q_direct = grid.at_node[Q_if_used]
except TypeError:
Q_direct = Q_if_used
stream_power_active_nodes = self._K_unit_time * dt * Q_direct[active_nodes]**self._m * self.slopes[active_nodes]**self._n / W
else: #just W to be used
stream_power_active_nodes = self._K_unit_time * dt * node_A[active_nodes]**self._m * self.slopes[active_nodes]**self._n / W
else: #just use_Q
try:
Q_direct = grid.at_node[Q_if_used]
except TypeError:
assert type(Q_if_used) in (np.ndarray, list)
Q_direct = Q_if_used
stream_power_active_nodes = self._K_unit_time * dt * Q_direct[active_nodes]**self._m * self.slopes[active_nodes]**self._n
#Note that we save "stream_power_erosion" incorporating both K and a. Most definitions would need this value /K then **(1/a) to give actual stream power (unit, total, whatever), and it does not yet include the threshold
self.stream_power_erosion[active_nodes] = stream_power_active_nodes
grid.at_node['stream_power_erosion'] = self.stream_power_erosion
#print "max stream power: ", self.stream_power_erosion.max()
erosion_increment = (self.stream_power_erosion - self.sp_crit).clip(0.)
#this prevents any node from incising below any node downstream of it
#we have to go in upstream order in case our rate is so big we impinge on baselevels > 1 node away
elev_dstr = node_z[flow_receiver]# we substract erosion_increment[flow_receiver] in the loop, as it can update
method = 'cython'
if method == 'cython':
from .cfuncs import erode_avoiding_pits
erode_avoiding_pits(node_order_upstream, flow_receiver, node_z,
erosion_increment)
else:
for i in node_order_upstream:
elev_this_node_before = node_z[i]
elev_this_node_after = elev_this_node_before - erosion_increment[i]
elev_dstr_node_after = elev_dstr[i] - erosion_increment[flow_receiver[i]]
if elev_this_node_after<elev_dstr_node_after:
erosion_increment[i] = (elev_this_node_before - elev_dstr_node_after)*0.999999 #we add a tiny elevation excess to prevent the module from ever totally severing its own flow paths
node_z -= erosion_increment
self.grid = grid
return grid, node_z, self.stream_power_erosion
|
decvalts/landlab
|
landlab/components/stream_power/stream_power.py
|
Python
|
mit
| 17,514 | 0.010049 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 09 10:39:38 2015
@author: 108630
"""
import os
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal, norm
from dtocean_core.utils.moorings import get_moorings_tables
# Note that the electrical folder in the test_data directory should be
# placed in the same folder as this file
this_dir = os.path.dirname(os.path.realpath(__file__))
elec_dir = os.path.join(this_dir, "electrical")
moor_dir = os.path.join(this_dir, "moorings")
## CONSTANTS
gravity = 9.80665 #gravity
seaden = 1025.0 #sea water density
airden = 1.226 #air density
#cylinder drag coefficients
dragcoefcyl = [[0.0, 0.0, 1e-5, 1e-2],
[1e4, 1.2, 1.2, 1.15],
[2e4, 1.2, 1.2, 1.05],
[3e4, 1.2, 1.2, 0.87],
[4e4, 1.2, 1.15, 0.82],
[5e4, 1.2, 1.0, 0.8],
[6e4, 1.2, 0.9, 0.8],
[7e4, 1.2, 0.85, 0.83],
[8e4, 1.2, 0.7, 0.9],
[9e4, 1.2, 0.65, 0.94],
[1e5, 1.2, 0.6, 0.95],
[2e5, 1.2, 0.35, 1.02],
[3e5, 1.15, 0.3, 1.03],
[4e5, 0.95, 0.33, 1.05],
[5e5, 0.6, 0.35, 1.06],
[6e5, 0.35, 0.38, 1.07],
[7e5, 0.29, 0.4, 1.07],
[8e5, 0.31, 0.43, 1.08],
[9e5, 0.33, 0.45, 1.08],
[1e6, 0.35, 0.47, 1.08],
[2e6, 0.54, 0.53, 1.08],
[3e6, 0.62, 0.62, 1.08],
[4e6, 0.67, 0.67, 1.08]]
#cylinder wake amplification factors
wakeampfactorcyl = [[0.0, 2.0, 2.0],
[5.0, 0.4, 0.8],
[10.0, 0.78, 1.3],
[15.0, 1.07, 1.4],
[20.0, 1.25, 1.25],
[25.0, 1.2, 1.2],
[30.0, 1.18, 1.18],
[35.0, 1.12, 1.12],
[40.0, 1.1, 1.1],
[45.0, 1.06, 1.06],
[50.0, 1.03, 1.03],
[55.0, 1.01, 1.01],
[60.0, 1.0, 1.0]]
#rectangular section wind drag coefficients
winddragcoefrect = [[4.0, 1.2, 1.3, 1.4, 1.5, 1.6, 1.6, 1.6],
[3.0, 1.1, 1.2, 1.25, 1.35, 1.4, 1.4, 1.4],
[2.0, 1.0, 1.05, 1.1, 1.15, 1.2, 1.2, 1.2],
[1.5, 0.95, 1.0, 1.05, 1.1, 1.15, 1.15, 1.15],
[1.0, 0.9, 0.95, 1.0, 1.05, 1.1, 1.2, 1.4],
[0.6667, 0.8, 0.85, 0.9, 0.95, 1.0, 1.0, 1.0],
[0.5, 0.75, 0.75, 0.8, 0.85, 0.9, 0.9, 0.9],
[0.3333, 0.7, 0.75, 0.75, 0.75, 0.8, 0.8, 0.8],
[0.25, 0.7, 0.7, 0.75, 0.75, 0.75, 0.75, 0.75]]
#rectangular section current drag coefficients
currentdragcoefrect = [[10.0000, 1.88],
[5.0000, 1.95],
[3.3333, 2.06],
[2.5000, 2.24],
[2.0000, 2.39],
[1.6667, 2.6],
[1.4286, 2.73],
[1.2500, 2.5],
[1.1111, 2.31],
[1.0000, 2.19],
[0.9091, 2.06],
[0.8333, 1.95],
[0.7692, 1.87],
[0.7143, 1.8],
[0.6667, 1.73],
[0.6250, 1.67],
[0.5882, 1.63],
[0.5556, 1.58],
[0.5263, 1.52],
[0.5000, 1.49],
[0.4762, 1.46],
[0.4545, 1.44],
[0.4348, 1.41],
[0.4167, 1.37],
[0.4000, 1.35],
[0.3846, 1.32],
[0.3704, 1.29],
[0.3571, 1.26],
[0.3448, 1.25],
[0.3333, 1.23],
[0.3226, 1.21],
[0.3125, 1.2],
[0.3030, 1.19],
[0.2941, 1.18],
[0.2857, 1.16],
[0.2778, 1.15],
[0.2703, 1.15],
[0.2632, 1.15],
[0.2564, 1.15],
[0.2500, 1.15]]
#rectangular section wave drift coefficients
driftcoeffloatrect = [[0.0, 0.0],
[0.1, 0.02],
[0.2, 0.06],
[0.3, 0.15],
[0.4, 0.28],
[0.5, 0.44],
[0.6, 0.60],
[0.7, 0.74],
[0.8, 0.84],
[0.9, 0.91],
[1.0, 0.94],
[1.1, 0.97],
[1.2, 0.98],
[1.3, 0.99],
[1.4, 1.0],
[1.5, 1.0]]
#rectangular section wave inertia coefficients
waveinertiacoefrect = [[10.0, 2.23],
[5.0, 1.98],
[2.0, 1.7],
[1.0, 1.51],
[0.5, 1.36],
[0.2, 1.21],
[0.1, 1.14]]
## LEASE AREA
startx = 1000.
endx = 2000.
dx = 10.
numx = int(float(endx - startx) / dx) + 1
starty = 0.
endy = 2500.
dy = 10.
numy = int(float(endy - starty) / dy) + 1
x = np.linspace(startx, endx, numx)
y = np.linspace(starty, endy, numy)
nx = len(x)
ny = len(y)
# Bathymetry
X, Y = np.meshgrid(x,y)
Z = np.zeros(X.shape) - 50.
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "loose sand"
strata = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
# Soil characteristics
max_temp = 10.
max_soil_res = 10.
target_burial_depth = 10
# Polygons
lease_area = [(startx, starty),
(endx, starty),
(endx, endy),
(startx, endy)]
#nogo_areas = [np.array([[50., 50.],[60., 50.],[60., 60.],[50., 60.]])]
nogo_areas = None
# Tidal time series
n_bins = 6
time_points = 48
t = np.linspace(0, 1, time_points)
rv = norm()
time_sin = np.sin(np.linspace(0, 4*np.pi, time_points))
time_scaled = time_sin * (1. / np.amax(time_sin))
xgrid, ygrid = np.meshgrid(x,y)
pos = np.dstack((xgrid, ygrid))
rv = multivariate_normal([500., 150.], [[max(x)*5., max(y)*2.],
[max(y)*2., max(x)*5.]])
u_max = 0.
v_max = 6.
ssh_max = 1.
TI = 0.1
grid_pdf = rv.pdf(pos).T
#u_scaled = grid_pdf * (u_max / np.amax(grid_pdf))
u_scaled = np.ones((nx, ny)) * u_max
v_scaled = np.ones((nx, ny)) * v_max
ssh_scaled = grid_pdf * (ssh_max / np.amax(grid_pdf))
u_arrays = []
v_arrays = []
ssh_arrays = []
for multiplier in time_scaled:
u_arrays.append(np.abs(u_scaled * multiplier))
v_arrays.append(np.abs(v_scaled * multiplier))
ssh_arrays.append(ssh_scaled * multiplier)
U = np.dstack(u_arrays)
V = np.dstack(v_arrays)
SSH = np.dstack(ssh_arrays)
TI = np.ones(SSH.shape) * TI
tidal_series_raw = {"values": {"U": U,
"V": V,
"SSH": SSH,
"TI": TI},
"coords": [x, y, t]}
xc = x[int(nx/2)]
yc = y[int(ny/2)]
tidal_point = (xc, yc)
# Tidal flow characteristics (hydro)
power_law_exponent = np.array([7.])
blockage_ratio = 1.
# Tidal flow characteristics (moorings)
max_10year_current = 6.
max_10year_current_dir = 0.
current_profile = "1/7 Power Law" #current profile alternatives: "Uniform"
# "1/7 Power Law"
# Wave characterists
predominant_100year_wave_dir = 0.
max_100year_hs = 0.5
max_100year_tp = 10.
max_100year_gamma = 1.
# Wind characteristics
mean_100_year_wind_speed = 2.0
mean_100_year_wind_dir = 0.0
max_100_year_gust_speed = 6.8
max_100_year_gust_dir = 0.0
# Water level characterists
max_50_year_water_level = 5.0 #water level maximum offset
min_50_year_water_level = 0.0 #water level minimum offset
## CABLE CORRIDOR
startx = 0.
endx = 1000.
dx = 10.
numx = int(float(endx - startx) / dx)
starty = 1000.
endy = 1500.
dy = 10.
numy = int(float(endy - starty) / dy) + 1
x = np.linspace(startx, endx, numx)
y = np.linspace(starty, endy, numy)
nx = len(x)
ny = len(y)
# Bathymetry
X, Y = np.meshgrid(x,y)
Z = np.zeros(X.shape) - 50.
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "loose sand"
export_strata = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
# Soil characteristics
corridor_max_temp = 10.
corridor_max_soil_res = 10.
corridor_target_burial_depth = 20.
# Polygons
corridor_nogo_areas = None
# Tidal flow characteristics
corridor_10year_current = 6.
corridor_10year_current_dir = 0.
# Wave characterists
corridor_100year_wave_dir = 0.
## SHORELINE
landing_point = (0., 1250.)
onshore_infrastructure_cost = 1000000.
# MACHINE
X = np.array([ 0. , 0.1010101 , 0.2020202 , 0.3030303 ,
0.4040404 , 0.50505051, 0.60606061, 0.70707071,
0.80808081, 0.90909091, 1.01010101, 1.11111111,
1.21212121, 1.31313131, 1.41414141, 1.51515152,
1.61616162, 1.71717172, 1.81818182, 1.91919192,
2.02020202, 2.12121212, 2.22222222, 2.32323232,
2.42424242, 2.52525253, 2.62626263, 2.72727273,
2.82828283, 2.92929293, 3.03030303, 3.13131313,
3.23232323, 3.33333333, 3.43434343, 3.53535354,
3.63636364, 3.73737374, 3.83838384, 3.93939394,
4.04040404, 4.14141414, 4.24242424, 4.34343434,
4.44444444, 4.54545455, 4.64646465, 4.74747475,
4.84848485, 4.94949495, 5.05050505, 5.15151515,
5.25252525, 5.35353535, 5.45454545, 5.55555556,
5.65656566, 5.75757576, 5.85858586, 5.95959596,
6.06060606, 6.16161616, 6.26262626, 6.36363636,
6.46464646, 6.56565657, 6.66666667, 6.76767677,
6.86868687, 6.96969697, 7.07070707, 7.17171717,
7.27272727, 7.37373737, 7.47474747, 7.57575758,
7.67676768, 7.77777778, 7.87878788, 7.97979798,
8.08080808, 8.18181818, 8.28282828, 8.38383838,
8.48484848, 8.58585859, 8.68686869, 8.78787879,
8.88888889, 8.98989899, 9.09090909, 9.19191919,
9.29292929, 9.39393939, 9.49494949, 9.5959596 ,
9.6969697 , 9.7979798 , 9.8989899 , 10. ])
Cp = np.array([ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0.00248182, 0.0273 , 0.05211818, 0.07693636, 0.10175455,
0.12657273, 0.15139091, 0.17620909, 0.20102727, 0.22584545,
0.25066364, 0.27548182, 0.3003 , 0.32511818, 0.34993636,
0.37475455, 0.39957273, 0.42439091, 0.44920909, 0.47402727,
0.49884545, 0.52366364, 0.54848182, 0.5733 , 0.59811818,
0.62293636, 0.64775455, 0.67257273, 0.69739091, 0.72220909,
0.74702727, 0.77184545, 0.79666364, 0.82148182, 0.8463 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86 ,
0.86 , 0.86 , 0.86 , 0.86 , 0.86
])
Ct = 0.4*np.ones((100))
# Performance curves are matched to the same veloity abscissae
tidal_performance = {"Velocity": X,
"Coefficient of Power": Cp,
"Coefficient of Thrust": Ct}
# Device characterists
turbine_hub_height = 20.
rotor_diam = 18.
turbine_interdist = None
min_install = -np.inf
max_install = -40.
min_dist_x = 40.
min_dist_y = 40.
bidirection = True
rated_power_device = 1.
device_voltage= 10.
yaw_angle = 0.
cut_in = 1.
cut_out = 5.
connection = 'Wet-Mate'
footprint_radius = 20.
device_draft = None
umbilical_connection = None
umbilical_safety = None
power_factor = 0.98
sys_prof = "Cylindrical" #device profile options: "Cylindrical" "Rectangular"
sys_mass = 300.0e3 #device mass
sys_cog = [0.0, 0.0, 15.0] #device centre of gravity
sys_vol = 148.44 #device displaced volume
sys_height = 21.0 #device height
sys_width = 3.0 #device width
sys_length = 3.0 #device length
sys_dry_frontal = 0.0 #device dry frontal area
sys_dry_beam = 0.0 #device dry beam area
sys_wet_frontal = 63.0 #device wet frontal area
sys_wet_beam = 63.0 #device wet beam area
sys_rough = 0.9e-2 #device surface roughness
#predefined foundation type: Shallow, Gravity, Pile, Suction Caisson,
# Direct Embedment, Drag
prefound = None
#foundation locations (from device origin)
found_loc = np.array([[-10.0, -10.0, 0.0],
[-10.0, 10.0, 0.0],
[ 10.0, 10.0, 0.0],
[ 10.0, -10.0, 0.0]])
# ARRAY LAYOUT
user_array_option = 'User Defined Fixed'
pos = [(1250., 250.),
(1750., 250.),
(1250., 750.),
(1750., 750.),
(1250., 1250.),
(1750., 1250.),
(1250., 1750.),
(1750., 1750.),
(1250., 2250.),
(1750., 2250.)]
user_array_layout = np.array(pos)
main_direction = None
rated_array_power = 5.
## ELECTRICAL NETWORK
# Farm
devices_per_string = 10
network_configuration = ["Radial"]
min_voltage = 15.
max_voltage = 30.
connector_type = "Wet-Mate"
collection_point_type = "Subsea"
# Corridor
corridor_voltage = 120.
number_of_export_cables = None
## FOUNDATIONS
found_safety = 1.5 #foundation safety factor
grout_safety = 6.0 #grout safety factor
fab_cost = None # 1.0 #optional fabrication cost factor
## COMPONENT DATA
# Electrical
component_data_path = os.path.join(elec_dir, 'mock_db.xlsx')
xls_file = pd.ExcelFile(component_data_path, encoding = 'utf-8')
sheet_names = xls_file.sheet_names
static_cable = xls_file.parse(sheet_names[0])
dynamic_cable = xls_file.parse(sheet_names[1])
wet_mate_connectors = xls_file.parse(sheet_names[2])
dry_mate_connectors = xls_file.parse(sheet_names[3])
transformers = xls_file.parse(sheet_names[4])
collection_points = xls_file.parse(sheet_names[5])
collection_point_cog = {11: [0,0,0],
12: [0,0,0],
22: [0,0,0],
23: [0,0,0],
24: [0,0,0],
25: [0,0,0]
}
collection_point_found = {11: [0,0,0],
12: [0,0,0],
22: [0,0,0],
23: [0,0,0],
24: [0,0,0],
25: [0,0,0]
}
compat_data_path = os.path.join(elec_dir,
'equipment_compatibility_matrix.xlsx')
xls_file = pd.ExcelFile(compat_data_path, encoding='utf-8')
sheet_names = xls_file.sheet_names
installation_soil_compatibility = xls_file.parse(sheet_names[0],
index_col=None)
installation_soil_compatibility.columns = ['Technique',
'Loose Sand',
'Medium Sand',
'Dense Sand',
'Very Soft Clay',
'Soft Clay',
'Firm Clay',
'Stiff Clay',
'Hard Glacial Till',
'Cemented',
'Soft Rock Coral',
'Hard Rock',
'Gravel Cobble']
equipment_gradient_constraint = 14.
# Moorings and Foundations
compdict = eval(open(os.path.join(moor_dir, 'dummycompdb.txt')).read())
comp_tables = get_moorings_tables(compdict) #component database
cost_steel = 1.0 #steel cost
cost_grout = 0.1 #grout cost
cost_concrete = 0.11 #concrete cost
grout_strength = 125.0 #grout strength
## MATERIALS
# Foundations
steelden = 7750.0 #steel density
conden = 2400.0 #concrete density
groutden = 2450.0 #grout density
# Substrate
draincoh = 0.0 #drained soil cohesion
unsfang = 5.0 #undrained soil friction angle
dsfang = 35.0 #drained soil friction angle
soilweight = 9.4285e+03 #buoyant soil weight
relsoilden = 50.0 #relative soil density
undrained_soil_shear_strength_constant = 1.45e3
undrained_soil_shear_strength_dependent = 2e3 #undrained shear friction angle
soilsen = 3.0 #soil sensitivity
rockcomstr = 206843.0 #rock compressive strength
# default soil properties table
soilprops = pd.read_csv(os.path.join(moor_dir, 'soilprops.txt'),
sep='\t',
header=0,
index_col=False)
# buried line bearing capacity factors
line_bcf = [[20, 3],
[25, 5],
[30, 8],
[35, 12],
[40, 22],
[45, 36]]
#subgrade reaction coefficients
k1coeff = [[1, 100, 200],
[2, 57, 119],
[3, 45.75, 94],
[4, 34.5, 69],
[5, 30.75, 56],
[6, 27, 43],
[7, 25.25, 38],
[8, 23.5, 33],
[9, 22.25, 29],
[10, 21, 25],
[11, 19.75, 22.5],
[12, 18.5, 20],
[13, 17.75, 19],
[14, 17, 18],
[15, 16.5, 17.5],
[16, 16, 17],
[17, 15.75, 16.75],
[18, 15.5, 16.5],
[19, 15.25, 16.25],
[20, 15, 16]]
#subgrade soil reaction coefficients cohesionless
subgradereaccoef = [[0.5, 4886048.0, 12893739.0, 24158795.0, 32573656.0],
[1.0, 3800260.0, 10043544.0, 17644064.0, 24430242.0],
[1.5, 3257366.0, 7464796.0, 14115251.0, 19272747.0],
[2.0, 2850195.0, 6107561.0, 11672227.0, 16286828.0],
[2.5, 2443024.0, 5428943.0, 10179268.0, 14658145.0],
[3.0, 2171577.0, 5021772.0, 9229203.0, 13300910.0],
[3.5, 2035854.0, 4750325.0, 8414861.0, 11943674.0],
[4.0, 1764406.0, 4411016.0, 7736243.0, 10857885.0],
[4.5, 1628683.0, 4139569.0, 7193349.0, 10043544.0],
[5.0, 1560821.0, 3935983.0, 6650455.0, 9229203.0],
[5.5, 1425097.0, 3732398.0, 6107561.0, 8686308.0],
[6.0, 1357236.0, 3596675.0, 5768252.0, 8143414.0],
[6.5, 1289374.0, 3393089.0, 5361081.0, 7736243.0],
[7.0, 1221512.0, 3257366.0, 5021772.0, 7261211.0],
[7.5, 1153650.0, 3053780.0, 4818187.0, 6854040.0],
[8.0, 1085789.0, 2850195.0, 4614601.0, 6514731.0],
[8.5, 1017927.0, 2646610.0, 4411016.0, 6243284.0],
[9.0, 950065.0, 2443024.0, 4207431.0, 5971837.0],
[9.5, 814341.0, 2307301.0, 4003845.0, 5700390.0],
[10.0, 678618.0, 2239439.0, 3868122.0, 5428943.0]]
#pile deflection coefficients
piledefcoef = [[2.0, 4.65, 3.4],
[2.25, 3.51, 2.4],
[2.5, 2.95, 2.05],
[2.75, 2.77, 1.85],
[3.0, 2.75, 1.8],
[3.25, 2.73, 1.77],
[3.5, 2.7, 1.75],
[3.75, 2.67, 1.72],
[4.0, 2.65, 1.7],
[4.25, 2.637, 1.7],
[4.5, 2.63, 1.7],
[4.75, 2.62, 1.7],
[5.0, 2.61, 1.7]]
#pile moment coefficients am
pilemomcoefam = [
[0, 0, 0, 0, 0, 0],
[0.25, 0.255751417, 0.255752104, 0.25576445, 0.243605698, 0.227417941],
[0.5, 0.475, 0.475, 0.475, 0.475, 0.422],
[0.75, 0.642998583, 0.642997896, 0.64298555, 0.655144302, 0.534082059],
[1, 0.745, 0.745, 0.745, 0.745, 0.514],
[1.25, 0.773629251, 0.773631312, 0.773668349, 0.722567095, 0.345628822],
[1.5, 0.751, 0.751, 0.751, 0.634, 0.147],
[1.75, 0.700609413, 0.700601855, 0.700466055, 0.52883732, 0.036527654],
[2, 0.622, 0.622, 0.622, 0.402, 0],
[2.25, 0.513558099, 0.513586268, 0.51409243, 0.246333627, 0],
[2.5, 0.393, 0.393, 0.393, 0.101, 0],
[2.75, 0.280908193, 0.280803075, 0.277539226, 0.007453173, 0],
[3, 0.19, 0.19, 0.179, -0.03, 0],
[3.25, 0.126559129, 0.127576434, 0.104750666, 0, 0],
[3.5, 0.079, 0.084, 0.054, 0, 0],
[3.75, 0.03435529, 0.048391189, 0.023458111, 0, 0],
[4, -0.008, 0.021, 0.008, 0, 0],
[4.25, -0.04560529, 0.004858811, 0, 0, 0],
[4.5, -0.076, 0.003, 0, 0, 0]
]
#pile moment coefficients bm
pilemomcoefbm = [
[0, 1, 1, 1, 1, 1],
[0.25, 0.987315551, 0.987332937, 0.98122151, 0.992090215, 0.969472347],
[0.5, 0.970278, 0.970278, 0.970278, 0.970278, 0.938576],
[0.75, 0.935494699, 0.935477313, 0.94555149, 0.925895035, 0.865467403],
[1, 0.869573, 0.869573, 0.885424, 0.850273, 0.708303],
[1.25, 0.764931527, 0.764983686, 0.777489904, 0.738243769, 0.452696415],
[1.5, 0.637234, 0.637234, 0.646193, 0.59864, 0.19409],
[1.75, 0.505730443, 0.505539192, 0.519353392, 0.44529864, 0.031270686],
[2, 0.380771, 0.380771, 0.401447, 0.298073, -0.033425],
[2.25, 0.269349077, 0.270061921, 0.292410027, 0.175437797, 0],
[2.5, 0.173931, 0.173931, 0.197364, 0.084337, 0],
[2.75, 0.094848998, 0.092188875, 0.121739999, 0, 0],
[3, 0.028426, 0.028426, 0.067022, 0, 0],
[3.25, -0.028406943, -0.014258045, 0.032568228, 0, 0],
[3.5, -0.072277, -0.038507, 0.013181, 0, 0],
[3.75, -0.099507477, -0.047911693, 0.003155466, 0, 0],
[4, -0.111648, -0.044108, -0.000686, 0, 0],
[4.25, -0.111554773, -0.028243057, 0, 0, 0],
[4.5, -0.102084, -0.001464, 0, 0, 0]
]
#pile limiting values non calcaeous soils
pilefricresnoncal = [[35, 30, 40, 95.761e3, 9576.051e3],
[30, 25, 20, 81.396e3, 4788.026e3],
[25, 20, 12, 67.032e3, 2872.815e3],
[20, 15, 8, 47.880e3, 1915.210e3]]
#plate anchor holding capacity factors
hcfdrsoil = [
[1.0, 1.638945315, 1.994698838, 2.307140604, 2.784, 3.396946397],
[2.0, 2.250880594, 3.062312263, 3.879752818, 5.05497647, 6.628796215],
[3.0, 2.707479408, 4.280253728, 5.806261102, 8.04851414, 11.22159355],
[4.0, 3.042979832, 4.987799672, 7.670616585, 11.18864898, 17.28173317],
[5.0, 3.093482117, 5.183423044, 9.04172475, 13.93390939, 23.58377747],
[6.0, 3.143984402, 5.284457994, 10.04028578, 16.67916981, 30.0924043],
[7.0, 3.194486687, 5.385492944, 10.32644242, 19.42443022, 37.24428049],
[8.0, 3.244988972, 5.486527894, 10.61259905, 20.35658647, 42.81468093],
[9.0, 3.295491257, 5.587562844, 10.89875569, 21.15351064, 47.52531117],
[10.0, 3.345993542, 5.688597794, 11.18491233, 21.95043482, 50.76899705],
[11.0, 3.396495827, 5.789632743, 11.47106897, 22.74735899, 53.1019566],
[12.0, 3.446998112, 5.890667693, 11.75722561, 23.54428317, 55.43491614],
[13.0, 3.497500397, 5.991702643, 12.04338224, 24.34120734, 57.76787568]
]
# SOLVER OPTIONS
op_threshold = 0.
# LOAD VARIABLES
test_data = {
"bathymetry.layers": strata,
"constants.line_bearing_capacity_factor": line_bcf,
"constants.pile_Am_moment_coefficient": pilemomcoefam,
"constants.pile_Bm_moment_coefficient": pilemomcoefbm,
"constants.pile_deflection_coefficients": piledefcoef,
"constants.pile_skin_friction_end_bearing_capacity": pilefricresnoncal,
"constants.soil_cohesionless_reaction_coefficient": subgradereaccoef,
"constants.soil_cohesive_reaction_coefficient": k1coeff,
"constants.soil_drained_holding_capacity_factor": hcfdrsoil,
"farm.soil_sensitivity": soilsen,
"constants.soilprops": soilprops,
"constants.gravity": gravity,
"constants.sea_water_density": seaden,
"constants.air_density": airden,
"constants.steel_density": steelden,
"constants.concrete_density": conden,
"constants.grout_density": groutden,
"constants.grout_compressive_strength": grout_strength,
"constants.cylinder_drag": dragcoefcyl,
"constants.cylinder_wake_amplificiation": wakeampfactorcyl,
"constants.rectangular_wind_drag": winddragcoefrect,
"constants.rectangular_current_drag": currentdragcoefrect,
"constants.rectangular_drift": driftcoeffloatrect,
"constants.rectangular_wave_inertia": waveinertiacoefrect,
"corridor.layers": export_strata,
"farm.collection_point_type": collection_point_type,
"farm.connector_type": connector_type,
"component.collection_points": collection_points,
"component.collection_point_cog": collection_point_cog,
"component.collection_point_foundations": collection_point_found,
"component.dry_mate_connectors": dry_mate_connectors,
"component.dynamic_cable": dynamic_cable,
"project.equipment_gradient_constraint": equipment_gradient_constraint,
"component.installation_soil_compatibility": installation_soil_compatibility,
"component.static_cable": static_cable,
"component.transformers": transformers,
"component.wet_mate_connectors": wet_mate_connectors,
"project.fabrication_cost": fab_cost,
"corridor.number_of_export_cables": number_of_export_cables,
"project.export_voltage": corridor_voltage,
"corridor.landing_point": landing_point,
"corridor.nogo_areas": corridor_nogo_areas,
"project.export_target_burial_depth": corridor_target_burial_depth,
"device.bidirection": bidirection,
"device.connector_type": connection,
"device.turbine_hub_height": turbine_hub_height,
"device.cut_in_velocity": cut_in,
"device.cut_out_velocity": cut_out,
"device.installation_depth_max": max_install,
"device.installation_depth_min": min_install,
"device.minimum_distance_x": min_dist_x,
"device.minimum_distance_y": min_dist_y,
"device.constant_power_factor": power_factor,
"device.power_rating": rated_power_device,
"device.prescribed_footprint_radius": footprint_radius,
"device.system_draft": device_draft,
"device.turbine_diameter": rotor_diam,
"device.turbine_interdistance": turbine_interdist,
"device.turbine_performance": tidal_performance,
"device.umbilical_connection_point": umbilical_connection,
"project.umbilical_safety_factor": umbilical_safety,
"device.voltage": device_voltage,
"device.yaw": yaw_angle,
"device.dry_beam_area": sys_dry_beam,
"device.dry_frontal_area": sys_dry_frontal,
"device.foundation_location": found_loc,
"project.foundation_safety_factor": found_safety,
"device.foundation_type": prefound,
"device.system_centre_of_gravity": sys_cog,
"device.system_displaced_volume": sys_vol,
"device.system_height": sys_height,
"device.system_length": sys_length,
"device.system_mass": sys_mass,
"device.system_profile": sys_prof,
"device.system_roughness": sys_rough,
"device.system_width": sys_width,
"device.wet_beam_area": sys_wet_beam,
"device.wet_frontal_area": sys_wet_frontal,
"farm.blockage_ratio": blockage_ratio,
"project.devices_per_string": devices_per_string,
"farm.direction_of_max_surface_current": max_10year_current_dir,
"project.main_direction": main_direction,
"farm.max_surface_current_10_year": max_10year_current,
"project.network_configuration": network_configuration,
"farm.nogo_areas": nogo_areas,
"project.onshore_infrastructure_cost": onshore_infrastructure_cost,
# "farm.power_law_exponent": power_law_exponent,
"project.rated_power": rated_array_power,
"project.target_burial_depth": target_burial_depth,
"project.tidal_occurrence_nbins": n_bins,
"farm.tidal_occurrence_point": tidal_point,
"farm.tidal_series": tidal_series_raw,
"farm.wave_direction_100_year": predominant_100year_wave_dir,
"farm.current_profile": current_profile,
"project.grout_strength_safety_factor": grout_safety,
"farm.max_gust_wind_direction_100_year": max_100_year_gust_dir,
"farm.max_gust_wind_speed_100_year": max_100_year_gust_speed,
"farm.max_hs_100_year": max_100year_hs,
"farm.max_tp_100_year": max_100year_tp,
"farm.max_water_level_50_year": max_50_year_water_level,
"farm.mean_wind_direction_100_year": mean_100_year_wind_dir,
"farm.mean_wind_speed_100_year": mean_100_year_wind_speed,
"farm.min_water_level_50_year": min_50_year_water_level,
"farm.wave_gamma_100_year": max_100year_gamma,
"project.cost_of_concrete": cost_concrete,
"project.cost_of_grout": cost_grout,
"project.cost_of_steel": cost_steel,
"options.optimisation_threshold": op_threshold,
"options.user_array_layout": user_array_layout,
"options.user_array_option": user_array_option,
"site.lease_boundary": lease_area,
'component.foundations_anchor': comp_tables["drag anchor"],
'component.foundations_pile': comp_tables["pile"],
'component.foundations_anchor_sand': comp_tables["drag anchor sand"],
'component.foundations_anchor_soft': comp_tables["drag anchor soft"]
}
if __name__ == "__main__":
from dtocean_core.utils.files import pickle_test_data
file_path = os.path.abspath(__file__)
pkl_path = pickle_test_data(file_path, test_data)
print "generate test data: {}".format(pkl_path)
|
DTOcean/dtocean-core
|
example_data/fixed_tidal_fixed_layout_10_scenario.py
|
Python
|
gpl-3.0
| 31,371 | 0.008288 |
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import subnet
from neutron_lib import constants
from neutron_lib.db import constants as db_constants
SEGMENT_ID = 'segment_id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
NAME_LEN = db_constants.NAME_FIELD_SIZE
DESC_LEN = db_constants.DESCRIPTION_FIELD_SIZE
ALIAS = 'segment'
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Segment'
API_PREFIX = ''
DESCRIPTION = 'Segments extension.'
UPDATED_TIMESTAMP = '2016-02-24T17:00:00-00:00'
RESOURCE_NAME = 'segment'
COLLECTION_NAME = RESOURCE_NAME + 's'
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {
'allow_post': False,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': db_constants.PROJECT_ID_FIELD_SIZE
},
'is_visible': False},
'network_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
PHYSICAL_NETWORK: {
'allow_post': True,
'allow_put': False,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {
'type:string': provider_net.PHYSICAL_NETWORK_MAX_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
NETWORK_TYPE: {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': provider_net.NETWORK_TYPE_MAX_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
SEGMENTATION_ID: {
'allow_post': True,
'allow_put': False,
'default': constants.ATTR_NOT_SPECIFIED,
'convert_to': converters.convert_to_int,
'is_sort_key': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {
'type:string_or_none': NAME_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
}
},
subnet.COLLECTION_NAME: {
SEGMENT_ID: {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {
'type:uuid_or_none': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = [
'standard-attr-description'
]
OPTIONAL_EXTENSIONS = [
# Use string instead of constant to avoid circulated import
'standard-attr-segment'
]
ACTION_STATUS = {}
|
openstack/neutron-lib
|
neutron_lib/api/definitions/segment.py
|
Python
|
apache-2.0
| 4,049 | 0 |
#
# usage: python k67.py {alias}
#
import sys
import pymongo
def find_aliases(alias):
cn = pymongo.MongoClient().MusicBrainz.artist
return [a for a in cn.find({'aliases.name': alias})]
if __name__ == '__main__':
n = sys.argv[1]
for a in find_aliases(n):
print(a)
|
wtsnjp/nlp100
|
chap07/k67.py
|
Python
|
unlicense
| 290 | 0.006897 |
from currencies.models import Currency
def currencies(request):
currencies = Currency.objects.active()
if not request.session.get('currency'):
try:
currency = Currency.objects.get(is_default__exact=True)
except Currency.DoesNotExist:
currency = None
request.session['currency'] = currency
return {
'CURRENCIES': currencies,
'CURRENCY': request.session['currency']
}
|
barseghyanartur/django-currencies
|
currencies/context_processors.py
|
Python
|
bsd-3-clause
| 450 | 0 |
"""!
@brief Cluster analysis algorithm: Expectation-Maximization Algorithm for Gaussian Mixture Model.
@details Implementation based on paper @cite article::ema::1.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import numpy
import random
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.kmeans import kmeans
from pyclustering.utils import pi, calculate_ellipse_description, euclidean_distance_square
from enum import IntEnum
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import patches
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
class ema_init_type(IntEnum):
"""!
@brief Enumeration of initialization types for Expectation-Maximization algorithm.
"""
## Means are randomly taken from input dataset and variance or covariance is calculated based on
## spherical data that belongs to the chosen means.
RANDOM_INITIALIZATION = 0
## Two step initialization. The first is calculation of initial centers using K-Means++ method.
## The second is K-Means clustering using obtained centers in the first step. Obtained clusters
## and its centers are used for calculation of variance (covariance in case of multi-dimensional)
## data.
KMEANS_INITIALIZATION = 1
class ema_initializer():
"""!
@brief Provides services for preparing initial means and covariances for Expectation-Maximization algorithm.
@details Initialization strategy is defined by enumerator 'ema_init_type': random initialization and
kmeans with kmeans++ initialization. Here an example of initialization using kmeans strategy:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FAMOUS_SAMPLES
from pyclustering.cluster.ema import ema_initializer
sample = read_sample(FAMOUS_SAMPLES.SAMPLE_OLD_FAITHFUL)
amount_clusters = 2
initial_means, initial_covariance = ema_initializer(sample, amount_clusters).initialize()
print(initial_means)
print(initial_covariance)
@endcode
"""
__MAX_GENERATION_ATTEMPTS = 10
def __init__(self, sample, amount):
"""!
@brief Constructs EM initializer.
@param[in] sample (list): Data that will be used by the EM algorithm.
@param[in] amount (uint): Amount of clusters that should be allocated by the EM algorithm.
"""
self.__sample = sample
self.__amount = amount
def initialize(self, init_type = ema_init_type.KMEANS_INITIALIZATION):
"""!
@brief Calculates initial parameters for EM algorithm: means and covariances using
specified strategy.
@param[in] init_type (ema_init_type): Strategy for initialization.
@return (float|list, float|numpy.array) Initial means and variance (covariance matrix in case multi-dimensional data).
"""
if init_type == ema_init_type.KMEANS_INITIALIZATION:
return self.__initialize_kmeans()
elif init_type == ema_init_type.RANDOM_INITIALIZATION:
return self.__initialize_random()
raise NameError("Unknown type of EM algorithm initialization is specified.")
def __calculate_initial_clusters(self, centers):
"""!
@brief Calculate Euclidean distance to each point from the each cluster.
@brief Nearest points are captured by according clusters and as a result clusters are updated.
@return (list) updated clusters as list of clusters. Each cluster contains indexes of objects from data.
"""
clusters = [[] for _ in range(len(centers))]
for index_point in range(len(self.__sample)):
index_optim, dist_optim = -1, 0.0
for index in range(len(centers)):
dist = euclidean_distance_square(self.__sample[index_point], centers[index])
if (dist < dist_optim) or (index == 0):
index_optim, dist_optim = index, dist
clusters[index_optim].append(index_point)
return clusters
def __calculate_initial_covariances(self, initial_clusters):
covariances = []
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [self.__sample[index_point] for index_point in initial_cluster]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return covariances
def __initialize_random(self):
initial_means = []
for _ in range(self.__amount):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts = 0
while (mean in initial_means) and (attempts < ema_initializer.__MAX_GENERATION_ATTEMPTS):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts += 1
if attempts == ema_initializer.__MAX_GENERATION_ATTEMPTS:
mean = [ value + (random.random() - 0.5) * value * 0.2 for value in mean ]
initial_means.append(mean)
initial_clusters = self.__calculate_initial_clusters(initial_means)
initial_covariance = self.__calculate_initial_covariances(initial_clusters)
return initial_means, initial_covariance
def __initialize_kmeans(self):
initial_centers = kmeans_plusplus_initializer(self.__sample, self.__amount).initialize()
kmeans_instance = kmeans(self.__sample, initial_centers, ccore = True)
kmeans_instance.process()
means = kmeans_instance.get_centers()
covariances = []
initial_clusters = kmeans_instance.get_clusters()
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [ self.__sample[index_point] for index_point in initial_cluster ]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return means, covariances
class ema_observer:
"""!
@brief Observer of EM algorithm for collecting algorithm state on each step.
@details It can be used to obtain whole picture about clustering process of EM algorithm. Allocated clusters,
means and covariances are stored in observer on each step. Here an example of usage:
@code
from pyclustering.cluster.ema import ema, ema_observer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read data from text file.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Create EM observer.
observer = ema_observer()
# Create EM algorithm to allocated four clusters and pass observer to it.
ema_instance = ema(sample, 4, observer=observer)
# Run clustering process.
ema_instance.process()
# Print amount of steps that were done by the algorithm.
print("EMA steps:", observer.get_iterations())
# Print evolution of means and covariances.
print("Means evolution:", observer.get_evolution_means())
print("Covariances evolution:", observer.get_evolution_covariances())
# Print evolution of clusters.
print("Clusters evolution:", observer.get_evolution_clusters())
# Print final clusters.
print("Allocated clusters:", observer.get_evolution_clusters()[-1])
@endcode
"""
def __init__(self):
"""!
@brief Initializes EM observer.
"""
self.__means_evolution = []
self.__covariances_evolution = []
self.__clusters_evolution = []
def __len__(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
"""
return len(self.__means_evolution)
def get_iterations(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
"""
return len(self.__means_evolution)
def get_evolution_means(self):
"""!
@return (list) Mean of each cluster on each step of clustering.
"""
return self.__means_evolution
def get_evolution_covariances(self):
"""!
@return (list) Covariance matrix (or variance in case of one-dimensional data) of each cluster on each step of clustering.
"""
return self.__covariances_evolution
def get_evolution_clusters(self):
"""!
@return (list) Allocated clusters on each step of clustering.
"""
return self.__clusters_evolution
def notify(self, means, covariances, clusters):
"""!
@brief This method is used by the algorithm to notify observer about changes where the algorithm
should provide new values: means, covariances and allocated clusters.
@param[in] means (list): Mean of each cluster on currect step.
@param[in] covariances (list): Covariances of each cluster on current step.
@param[in] clusters (list): Allocated cluster on current step.
"""
self.__means_evolution.append(means)
self.__covariances_evolution.append(covariances)
self.__clusters_evolution.append(clusters)
class ema_visualizer:
"""!
@brief Visualizer of EM algorithm's results.
@details Provides services for visualization of particular features of the algorithm, for example,
in case of two-dimensional dataset it shows covariance ellipses.
"""
@staticmethod
def show_clusters(clusters, sample, covariances, means, figure=None, display=True):
"""!
@brief Draws clusters and in case of two-dimensional dataset draws their ellipses.
@details Allocated figure by this method should be closed using `close()` method of this visualizer.
@param[in] clusters (list): Clusters that were allocated by the algorithm.
@param[in] sample (list): Dataset that were used for clustering.
@param[in] covariances (list): Covariances of the clusters.
@param[in] means (list): Means of the clusters.
@param[in] figure (figure): If 'None' then new is figure is creater, otherwise specified figure is used
for visualization.
@param[in] display (bool): If 'True' then figure will be shown by the method, otherwise it should be
shown manually using matplotlib function 'plt.show()'.
@return (figure) Figure where clusters were drawn.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
if figure is None:
figure = visualizer.show(display=False)
else:
visualizer.show(figure=figure, display=False)
if len(sample[0]) == 2:
ema_visualizer.__draw_ellipses(figure, visualizer, clusters, covariances, means)
if display is True:
plt.show()
return figure
@staticmethod
def close(figure):
"""!
@brief Closes figure object that was used or allocated by the visualizer.
@param[in] figure (figure): Figure object that was used or allocated by the visualizer.
"""
plt.close(figure)
@staticmethod
def animate_cluster_allocation(data, observer, animation_velocity = 75, movie_fps = 1, save_movie = None):
"""!
@brief Animates clustering process that is performed by EM algorithm.
@param[in] data (list): Dataset that is used for clustering.
@param[in] observer (ema_observer): EM observer that was used for collection information about clustering process.
@param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only).
@param[in] movie_fps (uint): Defines frames per second (for rendering movie only).
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
figure = plt.figure()
def init_frame():
return frame_generation(0)
def frame_generation(index_iteration):
figure.clf()
figure.suptitle("EM algorithm (iteration: " + str(index_iteration) +")", fontsize = 18, fontweight = 'bold')
clusters = observer.get_evolution_clusters()[index_iteration]
covariances = observer.get_evolution_covariances()[index_iteration]
means = observer.get_evolution_means()[index_iteration]
ema_visualizer.show_clusters(clusters, data, covariances, means, figure, False)
figure.subplots_adjust(top=0.85)
return [figure.gca()]
iterations = len(observer)
cluster_animation = animation.FuncAnimation(figure, frame_generation, iterations, interval = animation_velocity, init_func = init_frame, repeat_delay = 5000)
if save_movie is not None:
cluster_animation.save(save_movie, writer='ffmpeg', fps=movie_fps, bitrate=1500)
else:
plt.show()
plt.close(figure)
@staticmethod
def __draw_ellipses(figure, visualizer, clusters, covariances, means):
ax = figure.get_axes()[0]
for index in range(len(clusters)):
angle, width, height = calculate_ellipse_description(covariances[index])
color = visualizer.get_cluster_color(index, 0)
ema_visualizer.__draw_ellipse(ax, means[index][0], means[index][1], angle, width, height, color)
@staticmethod
def __draw_ellipse(ax, x, y, angle, width, height, color):
if (width > 0.0) and (height > 0.0):
ax.plot(x, y, color=color, marker='x', markersize=6)
ellipse = patches.Ellipse((x, y), width, height, alpha=0.2, angle=-angle, linewidth=2, fill=True, zorder=2, color=color)
ax.add_patch(ellipse)
class ema:
"""!
@brief Expectation-Maximization clustering algorithm for Gaussian Mixture Model (GMM).
@details The algorithm provides only clustering services (unsupervised learning).
Here an example of data clustering process:
@code
from pyclustering.cluster.ema import ema, ema_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read data from text file.
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# Create EM algorithm to allocated four clusters.
ema_instance = ema(sample, 3)
# Run clustering process.
ema_instance.process()
# Get clustering results.
clusters = ema_instance.get_clusters()
covariances = ema_instance.get_covariances()
means = ema_instance.get_centers()
# Visualize obtained clustering results.
ema_visualizer.show_clusters(clusters, sample, covariances, means)
@endcode
Here is clustering results of the Expectation-Maximization clustering algorithm where popular sample 'OldFaithful' was used.
Initial random means and covariances were used in the example. The first step is presented on the left side of the figure and
final result (the last step) is on the right side:
@image html ema_old_faithful_clustering.png
@see ema_visualizer
@see ema_observer
"""
def __init__(self, data, amount_clusters, means=None, variances=None, observer=None, tolerance=0.00001, iterations=100):
"""!
@brief Initializes Expectation-Maximization algorithm for cluster analysis.
@param[in] data (list): Dataset that should be analysed and where each point (object) is represented by the list of coordinates.
@param[in] amount_clusters (uint): Amount of clusters that should be allocated.
@param[in] means (list): Initial means of clusters (amount of means should be equal to amount of clusters for allocation).
If this parameter is 'None' then K-Means algorithm with K-Means++ method will be used for initialization by default.
@param[in] variances (list): Initial cluster variances (or covariances in case of multi-dimensional data). Amount of
covariances should be equal to amount of clusters that should be allocated. If this parameter is 'None' then
K-Means algorithm with K-Means++ method will be used for initialization by default.
@param[in] observer (ema_observer): Observer for gathering information about clustering process.
@param[in] tolerance (float): Defines stop condition of the algorithm (when difference between current and
previous log-likelihood estimation is less then 'tolerance' then clustering is over).
@param[in] iterations (uint): Additional stop condition parameter that defines maximum number of steps that can be
performed by the algorithm during clustering process.
"""
self.__data = numpy.array(data)
self.__amount_clusters = amount_clusters
self.__tolerance = tolerance
self.__iterations = iterations
self.__observer = observer
self.__means = means
self.__variances = variances
self.__verify_arguments()
if (means is None) or (variances is None):
self.__means, self.__variances = ema_initializer(data, amount_clusters).initialize(ema_init_type.KMEANS_INITIALIZATION)
if len(self.__means) != amount_clusters:
self.__amount_clusters = len(self.__means)
self.__rc = [ [0.0] * len(self.__data) for _ in range(amount_clusters) ]
self.__pic = [1.0] * amount_clusters
self.__clusters = []
self.__gaussians = [ [] for _ in range(amount_clusters) ]
self.__stop = False
def process(self):
"""!
@brief Run clustering process of the algorithm.
@return (ema) Returns itself (EMA instance).
"""
previous_likelihood = -200000
current_likelihood = -100000
current_iteration = 0
while(self.__stop is False) and (abs(previous_likelihood - current_likelihood) > self.__tolerance) and (current_iteration < self.__iterations):
self.__expectation_step()
self.__maximization_step()
current_iteration += 1
self.__extract_clusters()
self.__notify()
previous_likelihood = current_likelihood
current_likelihood = self.__log_likelihood()
self.__stop = self.__get_stop_condition()
self.__normalize_probabilities()
return self
def get_clusters(self):
"""!
@return (list) Allocated clusters where each cluster is represented by list of indexes of points from dataset,
for example, two cluster may have following representation [[0, 1, 4], [2, 3, 5, 6]].
"""
return self.__clusters
def get_centers(self):
"""!
@return (list) Corresponding centers (means) of clusters.
"""
return self.__means
def get_covariances(self):
"""!
@return (list) Corresponding variances (or covariances in case of multi-dimensional data) of clusters.
"""
return self.__variances
def get_probabilities(self):
"""!
@brief Returns 2-dimensional list with belong probability of each object from data to cluster correspondingly,
where that first index is for cluster and the second is for point.
@code
# Get belong probablities
probabilities = ema_instance.get_probabilities();
# Show porbability of the fifth element in the first and in the second cluster
index_point = 5;
print("Probability in the first cluster:", probabilities[0][index_point]);
print("Probability in the first cluster:", probabilities[1][index_point]);
@endcode
@return (list) 2-dimensional list with belong probability of each object from data to cluster.
"""
return self.__rc
def __erase_empty_clusters(self):
clusters, means, variances, pic, gaussians, rc = [], [], [], [], [], []
for index_cluster in range(len(self.__clusters)):
if len(self.__clusters[index_cluster]) > 0:
clusters.append(self.__clusters[index_cluster])
means.append(self.__means[index_cluster])
variances.append(self.__variances[index_cluster])
pic.append(self.__pic[index_cluster])
gaussians.append(self.__gaussians[index_cluster])
rc.append(self.__rc[index_cluster])
if len(self.__clusters) != len(clusters):
self.__clusters, self.__means, self.__variances, self.__pic = clusters, means, variances, pic
self.__gaussians, self.__rc = gaussians, rc
self.__amount_clusters = len(self.__clusters)
def __notify(self):
if self.__observer is not None:
self.__observer.notify(self.__means, self.__variances, self.__clusters)
def __extract_clusters(self):
self.__clusters = [[] for _ in range(self.__amount_clusters)]
for index_point in range(len(self.__data)):
candidates = []
for index_cluster in range(self.__amount_clusters):
candidates.append((index_cluster, self.__rc[index_cluster][index_point]))
index_winner = max(candidates, key=lambda candidate: candidate[1])[0]
self.__clusters[index_winner].append(index_point)
self.__erase_empty_clusters()
def __log_likelihood(self):
likelihood = 0.0
for index_point in range(len(self.__data)):
particle = 0.0
for index_cluster in range(self.__amount_clusters):
particle += self.__pic[index_cluster] * self.__gaussians[index_cluster][index_point]
if particle > 0.0:
likelihood += numpy.log(particle)
return likelihood
def __probabilities(self, index_cluster, index_point):
divider = 0.0
for i in range(self.__amount_clusters):
divider += self.__pic[i] * self.__gaussians[i][index_point]
if (divider != 0.0) and (divider != float('inf')):
return self.__pic[index_cluster] * self.__gaussians[index_cluster][index_point] / divider
return 1.0
def __expectation_step(self):
self.__gaussians = [ [] for _ in range(self.__amount_clusters) ]
for index in range(self.__amount_clusters):
self.__gaussians[index] = gaussian(self.__data, self.__means[index], self.__variances[index])
self.__rc = [ [0.0] * len(self.__data) for _ in range(self.__amount_clusters) ]
for index_cluster in range(self.__amount_clusters):
for index_point in range(len(self.__data)):
self.__rc[index_cluster][index_point] = self.__probabilities(index_cluster, index_point)
def __maximization_step(self):
self.__pic = []
self.__means = []
self.__variances = []
amount_impossible_clusters = 0
for index_cluster in range(self.__amount_clusters):
mc = numpy.sum(self.__rc[index_cluster])
if mc == 0.0:
amount_impossible_clusters += 1
continue
self.__pic.append( mc / len(self.__data) )
self.__means.append( self.__update_mean(self.__rc[index_cluster], mc) )
self.__variances.append( self.__update_covariance(self.__means[-1], self.__rc[index_cluster], mc) )
self.__amount_clusters -= amount_impossible_clusters
def __get_stop_condition(self):
for covariance in self.__variances:
if numpy.linalg.norm(covariance) == 0.0:
return True
return False
def __update_covariance(self, means, rc, mc):
covariance = 0.0
for index_point in range(len(self.__data)):
deviation = numpy.array([self.__data[index_point] - means])
covariance += rc[index_point] * deviation.T.dot(deviation)
covariance = covariance / mc
return covariance
def __update_mean(self, rc, mc):
mean = 0.0
for index_point in range(len(self.__data)):
mean += rc[index_point] * self.__data[index_point]
mean = mean / mc
return mean
def __normalize_probabilities(self):
for index_point in range(len(self.__data)):
probability = 0.0
for index_cluster in range(len(self.__clusters)):
probability += self.__rc[index_cluster][index_point]
if abs(probability - 1.0) > 0.000001:
self.__normalize_probability(index_point, probability)
def __normalize_probability(self, index_point, probability):
if probability == 0.0:
return
normalization = 1.0 / probability
for index_cluster in range(len(self.__clusters)):
self.__rc[index_cluster][index_point] *= normalization
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__data))
if self.__amount_clusters < 1:
raise ValueError("Amount of clusters (current value '%d') should be greater or equal to 1." %
self.__amount_clusters)
|
annoviko/pyclustering
|
pyclustering/cluster/ema.py
|
Python
|
gpl-3.0
| 28,795 | 0.010662 |
import itertools
from batchy.runloop import coro_return, runloop_coroutine
from batchy.batch_coroutine import batch_coroutine, class_batch_coroutine
from . import BaseTestCase
CALL_COUNT = 0
@batch_coroutine()
def increment(arg_lists):
def increment_single(n):
return n + 1
global CALL_COUNT
CALL_COUNT += 1
coro_return([increment_single(*ar, **kw) for ar, kw in arg_lists])
yield
@batch_coroutine(accepts_kwargs=False)
def increment_nokwargs(arg_lists):
global CALL_COUNT
CALL_COUNT += 1
coro_return(list(itertools.starmap(lambda _n: _n + 1, arg_lists)))
yield
class BatchClient(object):
def __init__(self):
self.get_call_count = 0
self.set_call_count = 0
self.run_call_count = 0
self.throw_count = 0
@class_batch_coroutine(1)
def get(self, arg_lists):
self.get_call_count += 1
yield self.run()
coro_return([0] * len(arg_lists))
@class_batch_coroutine(1)
def set(self, _):
self.set_call_count += 1
yield self.run()
@class_batch_coroutine(0)
def run(self, _):
self.run_call_count += 1
yield
@class_batch_coroutine(0)
def throw(self, _):
self.throw_count += 1
raise ValueError()
yield # pylint: disable-msg=W0101
@class_batch_coroutine(2)
def throw_sooner(self, _):
self.throw_count += 1
raise ValueError()
yield # pylint: disable-msg=W0101
def reset(self):
self.get_call_count = self.set_call_count = self.run_call_count = self.throw_count = 0
class BatchTests(BaseTestCase):
def setup(self):
global CALL_COUNT
CALL_COUNT = 0
def test_simple_batch(self):
@runloop_coroutine()
def test():
a, b, c = yield increment(1), increment(2), increment(3)
coro_return((a, b, c))
self.assert_equals((2,3,4), test())
self.assert_equals(1, CALL_COUNT)
def test_batch_no_kwargs(self):
@runloop_coroutine()
def test():
a, b, c = yield increment_nokwargs(1), increment_nokwargs(2), increment_nokwargs(3)
coro_return((a, b, c))
self.assert_equals((2,3,4), test())
self.assert_equals(1, CALL_COUNT)
def test_multi_clients(self):
client1, client2 = BatchClient(), BatchClient()
@runloop_coroutine()
def sub_1(client):
rv = yield client.get()
yield client.set()
coro_return(rv)
@runloop_coroutine()
def sub_2(client):
rv = yield client.get()
yield client.set()
coro_return(rv)
@runloop_coroutine()
def test1():
rv = yield sub_1(client1), sub_2(client2)
coro_return(rv)
test1()
self.assert_equal(1, client1.get_call_count)
self.assert_equal(1, client1.set_call_count)
self.assert_equal(2, client1.run_call_count)
self.assert_equal(1, client2.get_call_count)
self.assert_equal(1, client2.set_call_count)
self.assert_equal(2, client2.run_call_count)
client1.reset()
client2.reset()
@runloop_coroutine()
def test2():
rv = yield sub_1(client1), sub_2(client1)
coro_return(rv)
test2()
self.assert_equal(1, client1.get_call_count)
self.assert_equal(1, client1.set_call_count)
self.assert_equal(2, client1.run_call_count)
self.assert_equal(0, client2.get_call_count)
self.assert_equal(0, client2.set_call_count)
self.assert_equal(0, client2.run_call_count)
def test_exception(self):
client = BatchClient()
@runloop_coroutine()
def action_1():
yield client.throw()
@runloop_coroutine()
def action_2():
yield client.get('a')
yield client.throw()
@runloop_coroutine()
def test():
yield action_1(), action_1(), action_2()
self.assert_raises(ValueError, test)
def test_exception_sooner(self):
client = BatchClient()
@runloop_coroutine()
def action_1():
yield client.throw_sooner()
@runloop_coroutine()
def action_2():
yield client.get('a')
yield client.throw_sooner()
@runloop_coroutine()
def test():
yield action_1(), action_1(), action_2()
self.assert_raises(ValueError, test)
|
mikekap/batchy
|
tests/batch_tests.py
|
Python
|
apache-2.0
| 4,519 | 0.002213 |
import Sofa
import random
from cmath import *
############################################################################################
# this is a PythonScriptController example script
############################################################################################
############################################################################################
# following defs are used later in the script
############################################################################################
# utility methods
falling_speed = 0
capsule_height = 5
capsule_chain_height = 5
def createRigidCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
if len(args) <= 1:
height = random.uniform(1,3)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TCapsuleModel',template='Rigid',name='capsule_model',radii=str(radius),heights=str(height))
return 0
def createBulletCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
if len(args) <= 1:
height = random.uniform(1,3)
else:
height = args[1]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1,template='Rigid')
node.createObject('RigidBulletCapsuleModel',template='Rigid',name='capsule_model',radii=str(radius),heights=str(height),margin="0.5")
return 0
def createBulletCylinder(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
if len(args) <= 1:
height = random.uniform(1,3)
else:
height = args[1]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1,template='Rigid')
node.createObject('BulletCylinderModel',template='Rigid',name='capsule_model',radii=str(radius),heights=str(height))
return 0
def createFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return 0
def createBulletFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('BulletCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return 0
def createCapsuleChain(parentNode,name,length,x,y,z):
node = parentNode.createChild(name)
#radius=random.uniform(1,3)
radius=0.5
height=5
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
ray = 3.0
t = 0.0
delta_t = 0.7
topo_edges=''
particles=''
velocities = ''
springs=''
for i in range(0,length):
particles += str(x + (ray * cos(t)).real)+' '+str(y + (ray * sin(t)).real)+' '+str(z + i*capsule_chain_height)+' '
t += delta_t
if i < length -1:
topo_edges += str(i)+' '+str(i + 1)+' '
springs += str(i)+' '+str(i + 1)+' 10 1 '+str(capsule_chain_height)+' '
velocities+='0 0 '+str(falling_speed)+' '
topo_edges += str(length - 2)+' '+str(length -1)
springs += str(length - 2)+' '+str(length -1)+' 10 1 '+str(capsule_chain_height)
node.createObject('MechanicalObject',template='Vec3d',name='falling_particles',position=particles,velocity=velocities)
node.createObject('StiffSpringForceField',template='Vec3d',name='springforcefield',stiffness='100',damping='1',spring=springs)
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges=topo_edges,drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return 0
def createOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return 0
def createBulletOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1,template='Rigid')
node.createObject('BulletOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return 0
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
createRigidCapsule(parentNode,name,x,y,z)
else:
createFlexCapsule(parentNode,name,x,y,z)
return 0
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
createRigidCapsule(parentNode,name,x,y,z)
else:
createFlexCapsule(parentNode,name,x,y,z)
return 0
def createSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z),velocity='0 0 '+str(falling_speed))
node.createObject('TSphereModel',template='Vec3d',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return 0
def createBulletSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z),velocity='0 0 '+str(falling_speed))
node.createObject('BulletSphereModel',template='Vec3d',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return 0
def createRigidSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Rigid',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
node.createObject('TSphereModel',template='Rigid',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return 0
|
FabienPean/sofa
|
applications/plugins/BulletCollisionDetection/examples/PrimitiveCreation.py
|
Python
|
lgpl-2.1
| 9,475 | 0.054142 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import re
import fdb
import time
import os
from skarphedcore.configuration import Configuration
from common.errors import DatabaseException
class Database(object):
"""
The Database-Class handles the connection to a Firebird 2.5+ Database
"""
_borgmind = {}
def __init__(self):
"""
The Database loads connectiondata to the database from the config of Core
"""
self.__dict__ = Database._borgmind
if self.__dict__ == {}:
self._connection = None
self._ip = None
self._dbname = None
self._user = None
self._password = None
self._queryCache = QueryCache()
c = Configuration()
self.set_ip(c.get_entry('db.ip'))
self.set_db_name(c.get_entry('db.name'))
self.set_user(c.get_entry('db.user'))
self.set_password(c.get_entry('db.password'))
self.connect()
def connect(self):
"""
The class actually connects to the database and stores the
connection in _connection
"""
if None in (self._user, self._ip, self._dbname, self._password):
raise DatabaseException(DatabaseException.get_msg(1))
#TODO: Globally Definable DB-Path
try:
self._connection = fdb.connect(
host=self._ip,
database='/var/lib/firebird/2.5/data/'+self._dbname,
user=self._user,
password=self._password,
charset="UTF8")
except fdb.fbcore.DatabaseError, e:
raise DatabaseException(e.args[0])
return
def set_ip(self, ip):
"""
trivial
"""
self._ip = str(ip)
def set_db_name(self, dbname):
"""
trivial
"""
self._dbname = str(dbname)
def set_user(self, user):
"""
trivial
"""
self._user = str(user)
def set_password(self, password):
"""
trivial
"""
self._password = str(password)
def get_connection(self):
"""
trivial
"""
return self._connection
def commit(self):
"""
commits a pending transaction to the database
"""
self._connection.commit()
def query(self, statement, args=(), module=None, forceNoCache=False, commit=False):
"""
execute a query on the database. be sure to deliver the module.
it is necessary to determine tablenames
"""
try:
mutex = Configuration().get_entry("core.webpath")+"/db.mutex"
if commit: #only if writing stuff
while os.path.exists(mutex):
time.sleep(0.000001)
os.mkdir(mutex)
if self._connection is None:
raise DatabaseException(DatabaseException.get_msg(2))
if module is not None:
statement = self._replace_module_tables(module,statement)
cur = self._connection.cursor()
try:
prepared, cur = self._queryCache(cur, statement)
cur.execute(prepared,args)
except fdb.fbcore.DatabaseError,e:
raise DatabaseException(str(e))
if commit:
self.commit()
return cur
finally:
if commit:
os.rmdir(mutex)
def _replace_module_tables(self, module, query):
"""
replaces module-based tablenames like
'de.grinhold.skarphed.news.news'
with an actual SQL-table like
'TAB_000004'
"""
tagpattern = re.compile('\$\{[A-Za-z0-9.]+\}')
matches = tagpattern.findall(query)
matches = list(set(matches)) # making matches unique
matches = map(lambda s: s[2:-1], matches)
matchesRaw = list(matches)
modules = [module.get_name()]
for match in matches:
splitted = match.split(".")
if len(splitted) > 1:
matches.append(splitted[-1])
matches.remove(match)
splitted.remove(splitted[-1])
modules.append(".".join(splitted))
tableQuery = """SELECT MDT_ID, MDT_NAME, MOD_NAME
FROM MODULETABLES
INNER JOIN MODULES ON (MDT_MOD_ID = MOD_ID )
WHERE MOD_NAME IN (%s)
AND MDT_NAME IN (%s) ;"""%("'"+"','".join(modules)+"'","'"+"','".join(matches)+"'")
cur = self._connection.cursor()
cur.execute(tableQuery)
replacementsDone = []
for res in cur.fetchallmap():
pattern = "${"+res["MOD_NAME"]+"."+res["MDT_NAME"]+"}"
tableId = str(res["MDT_ID"])
tableId = "TAB_"+"0"*(6-len(tableId))+tableId
query = query.replace(pattern, tableId)
replacementsDone.append(res["MOD_NAME"]+"."+res["MDT_NAME"])
if res["MOD_NAME"] == module.get_name():
query = query.replace("${"+res["MDT_NAME"]+"}", tableId)
if len(matchesRaw) != len(replacementsDone):
for replacement in replacementsDone:
matchesRaw.remove(replacement)
raise DatabaseException(DatabaseException.get_msg(3,str(matchesRaw)))
return query
def get_seq_next(self,sequenceId):
"""
Yields the next value of a given sequence (e.g. 'MOD_GEN')
and increments it
if the sequence contains a "$"-character, tries to resolve name of table
"""
cur = self._connection.cursor()
if sequenceId.startswith("${"):
statement = "SELECT MDT_ID FROM MODULETABLES INNER JOIN MODULES ON MOD_ID = MDT_MOD_ID WHERE MOD_NAME = ? AND MDT_NAME = ? ;"
args = tuple(sequenceId[2:-1].split("."))
cur.execute(statement, args)
res = cur.fetchone()
seqnum = str(res[0])
sequenceId = "SEQ_"+"0"*(6-len(seqnum))+seqnum
statement = "SELECT GEN_ID ( %s , 1) FROM RDB$DATABASE ;"%str(sequenceId)
cur.execute(statement)
res = cur.fetchone()
return res[0]
def get_seq_current(self,sequenceId):
"""
Yields the current value of a given sequence (e.g. 'MOD_GEN')
without incrementing it
"""
cur = self._connection.cursor()
if sequenceId.startswith("${"):
statement = "SELECT MDT_ID FROM MODULETABLES INNER JOIN MODULES ON MOD_ID = MDT_MOD_ID WHERE MOD_NAME = ? AND MDT_NAME = ? ;"
args = tuple(sequenceId[2:-1].split("."))
cur.execute(statement, args)
res = cur.fetchone()
seqnum = str(res[0])
sequenceId = "SEQ_"+"0"*(6-len(seqnum))+seqnum
statement = "SELECT GEN_ID ( %s , 0) FROM RDB$DATABASE ;"%str(sequenceId)
cur.execute(statement)
res = cur.fetchone()
return res[0]
def set_seq_to(self,sequenceId, value):
"""
Yields the current value of a given sequence (e.g. 'MOD_GEN')
without incrementing it
"""
cur = self._connection.cursor()
statement = "SET GENERATOR %s TO %d ;"%(str(sequenceId), int(value))
cur.execute(statement)
self.commit()
def remove_tables_for_module(self, module):
"""
remove tables as part of module uninstallation
"""
stmnt = "SELECT MDT_ID \
FROM MODULETABLES \
WHERE MDT_MOD_ID = ? ;"
cur = self.query(stmnt,(module.get_id(),))
rows = cur.fetchallmap()
for row in rows:
tab_id = "0"*(6-len(str(row["MDT_ID"])))+str(row["MDT_ID"])
stmnt = "DROP TABLE TAB_%s ;"%tab_id
self.query(stmnt,commit=True)
stmnt = "DROP GENERATOR SEQ_%s ;"%tab_id
self.query(stmnt,commit=True)
stmnt = "DELETE FROM MODULETABLES WHERE MDT_ID = ? ;"
self.query(stmnt,(row["MDT_ID"],),commit=True)
def update_tables_for_module(self, module):
"""
update tables as part of module update
"""
tables = module.get_tables()
stmnt = "SELECT MDT_NAME FROM MODULETABLES WHERE MDT_MOD_ID = ? ;"
cur = self.query(stmnt,(module.get_id(),))
rows = cur.fetchallmap()
for row in rows:
if row["MDT_NAME"] not in [tbl["name"] for tbl in tables]:
self._remove_table_for_module(module,row["MDT_NAME"])
for table in tables:
if table["name"] not in [tbl["MDT_NAME"] for tbl in rows]:
self._create_table_for_module(module,table)
else:
self._update_columns_for_table(module, table)
def _update_columns_for_table(self, module, table):
"""
Deletes columns that exist but are not defined in module's manifest
Adds columns that doe not exist but are defined in modules's manifest
"""
stmnt = "SELECT RDB$FIELD_NAME FROM RDB$RELATION_FIELDS WHERE RDB$RELATION_NAME = ? ;"
stmnt_drop = "ALTER TABLE %s DROP %s ;"
stmnt_add = "ALTER TABLE %s ADD %s %s ;"
db_tablename = self._replace_module_tables(module,"${"+table["name"]+"}")
cur = self.query( stmnt, (db_tablename,))
rows = cur.fetchallmap()
for row in rows:
field_name = row["RDB$FIELD_NAME"].strip()
if field_name == "MOD_INSTANCE_ID":
continue
if field_name not in [col["name"] for col in table["columns"]]:
self.query( stmnt_drop%(db_tablename, field_name),commit=True)
for col in table["columns"]:
if col["name"] not in [r["RDB$FIELD_NAME"].strip() for r in rows]:
self.query( stmnt_add%(db_tablename, col["name"], col["type"]),commit=True)
def create_tables_for_module(self, module):
"""
create tables as part of module installation
"""
tables = module.get_tables()
for table in tables:
self._create_table_for_module(module,table)
def _remove_table_for_module(self, module, tablename):
"""
remove tables as part of module uninstallation
"""
stmnt = "SELECT MDT_ID \
FROM MODULETABLES \
WHERE MDT_MOD_ID = ? AND MDT_NAME = ? ;"
cur = self.query(stmnt,(module.get_id(),tablename))
rows = cur.fetchallmap()
row = rows[0]
tab_string = "0"*(6-len(str(row["MDT_ID"])))+str(row["MDT_ID"])
stmnt = "DROP TABLE TAB_%s"%tab_string
self.query(stmnt,commit=True)
stmnt = "DROP GENERATOR SEQ_%s"%tab_string
self.query(stmnt,commit=True)
stmnt = "DELETE FROM MODULETABLES WHERE MDT_ID = ? ;"
self.query(stmnt,(row["MDT_ID"],),commit=True)
def _create_table_for_module(self, module, table):
"""
creates a database table for a module
"""
new_table_id = self.get_seq_next('MDT_GEN')
new_table_string = "0"*(6-len(str(new_table_id)))+str(new_table_id)
stmnt = "CREATE TABLE TAB_%s ( MOD_INSTANCE_ID INT "%new_table_string
autoincrement = None
for col in table["columns"]:
stmnt += ", %s %s "%(col["name"],col["type"])
if col.has_key("primary"):
if type(col["primary"]) == bool and col["primary"]:
stmnt+="primary key "
elif col.has_key("unique") and type(col["unique"])==bool and col["unique"]:
stmnt+="unique "
else:
if col.has_key("unique") and type(col["unique"])==bool and col["unique"]:
stmnt+="unique "
if col.has_key("notnull") and type(col["notnull"])==bool and col["notnull"]:
stmnt+="not null "
if col.has_key("autoincrement") and type(col["autoincrement"])==bool and col["autoincrement"]:
if autoincrement is None:
autoincrement = col["name"]
stmnt+=") ;"
mst_stmnt = "INSERT INTO MODULETABLES (MDT_ID, MDT_NAME, MDT_MOD_ID ) VALUES ( ?, ?, ?) ;"
self.query( mst_stmnt, (new_table_id, table["name"], module.get_id()),commit=True)
self.query( stmnt,commit=True)
stmnt = "CREATE GENERATOR SEQ_%s ;"%new_table_string
self.query( stmnt,commit=True)
stmnt = "SET GENERATOR SEQ_%s TO 1 ;"%new_table_string
self.query( stmnt,commit=True)
#if autoincrement is not None:
#
# stmnt = """SET TERM ^ ;
# CREATE TRIGGER TRG_AUTO_%(nts)s FOR TAB_%(nts)s
# ACTIVE BEFORE INSERT POSITION 0
# AS
# DECLARE VARIABLE tmp DECIMAL(18,0);
# BEGIN
# IF (NEW.%(autoinc)s IS NULL) THEN
# NEW.%(autoinc)s = GEN_ID(SEQ_%(nts)s, 1);
# ELSE BEGIN
# tmp = GEN_ID(SEQ_%(nts)s, 0);
# IF (tmp < new.%(autoinc)s) THEN
# tmp = GEN_ID(SEQ_%(nts)s, new.%(autoinc)s - tmp);
# END
# END^
# SET TERM ; ^"""%{'autoinc':autoincrement,'nts':new_table_string}
# self.query(stmnt,commit=True)
class QueryCache(object):
"""
caches prepared statements and delivers them on demand
"""
RANK = 0
PREP = 1
CUR = 2
MAX_QUERIES = 20
def __init__(self):
"""
trivial
"""
self.queries = {}
def __call__(self, cur, query):
"""
looks if the given query is in the cache, if not, creates and returns
it. if the number of cached query exceeds the MAX_QUERIES, it deletes
the less used query
"""
if query not in self.queries:
if len(self.queries) >= self.MAX_QUERIES:
lowest = self._get_lowest_use_query()
del(self.queries[lowest])
self.queries[query] = {self.RANK:0,self.PREP:cur.prep(query), self.CUR: cur}
self.queries[query][self.RANK] += 1
return (self.queries[query][self.PREP] , self.queries[query][self.CUR])
def _get_lowest_use_query(self):
"""
returns the query that has been used the fewest times.
"""
lowest = None
lquery = None
for query, querydict in self.queries.items():
if lowest is None or querydict[self.RANK] < lowest:
lowest = querydict[self.RANK]
lquery = query
return lquery
|
skarphed/skarphed
|
core/lib/database.py
|
Python
|
agpl-3.0
| 15,700 | 0.008153 |
"""Admin functionality for services."""
from django.contrib import admin
from django.contrib.admin import site
from leaflet.admin import LeafletGeoAdmin
from .models import Service, ServiceImage
class ServiceImageInline(admin.TabularInline):
"""The inline for service images."""
model = ServiceImage
extra = 3
ordering = ("order",)
class ServiceAdmin(LeafletGeoAdmin, admin.ModelAdmin):
"""The class for the service admin."""
inlines = [ServiceImageInline]
site.register(Service, ServiceAdmin)
|
empowerhack/HealthMate
|
healthmate/services/admin.py
|
Python
|
mit
| 530 | 0 |
"""
@package mi.dataset.parser.test
@file mi/dataset/parser/test/test_nutnr_m_glider.py
@author Emily Hahn
@brief A test parser for the nutnr series m instrument through a glider
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException, ConfigurationException, DatasetParserException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.nutnr_m.glider.resource import RESOURCE_PATH
from mi.dataset.parser.glider import GliderParser
from mi.dataset.test.test_parser import ParserUnitTestCase
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
log = get_logger()
@attr('UNIT', group='mi')
class NutnrMGliderParserUnitTestCase(ParserUnitTestCase):
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'NutnrMDataParticle'
}
def test_simple(self):
"""
Test a simple case that we can parse a single message
"""
with open(os.path.join(RESOURCE_PATH, 'single.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
self.assert_particles(particles, "single.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_many(self):
"""
Test a simple case with more messages
"""
with open(os.path.join(RESOURCE_PATH, 'many.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(12)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 10)
self.assert_particles(particles, "many.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_full(self):
"""
Test a full file and confirm the right number of particles is returned
"""
with open(os.path.join(RESOURCE_PATH, 'unit_514-2014-351-2-0.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(40)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 31)
self.assertEqual(self.exception_callback_value, [])
def test_empty(self):
"""
An empty file will return a sample exception since it cannot read the header
"""
file_handle = open(os.path.join(RESOURCE_PATH, 'empty.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 0)
def test_bad_config(self):
"""
Test that a set of bad configurations produces the expected exceptions
"""
file_handle = open(os.path.join(RESOURCE_PATH, 'single.mrg'), 'rU')
# confirm a configuration exception occurs if no config is passed in
with self.assertRaises(ConfigurationException):
GliderParser({}, file_handle, self.exception_callback)
# confirm a config missing the particle class causes an exception
bad_config = {DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider'}
with self.assertRaises(ConfigurationException):
GliderParser(bad_config, file_handle, self.exception_callback)
# confirm a config with a non existing class causes an exception
bad_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'BadDataParticle'
}
with self.assertRaises(AttributeError):
GliderParser(bad_config, file_handle, self.exception_callback)
def test_bad_headers(self):
"""
Test that a file with a short header raises a sample exception
"""
# this file does not have enough header lines
file_handle = open(os.path.join(RESOURCE_PATH, 'short_header.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
# this file specifies a number of header lines other than 14
file_handle = open(os.path.join(RESOURCE_PATH, 'bad_num_header_lines.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
# this file specifies a number of label lines other than 3
file_handle = open(os.path.join(RESOURCE_PATH, 'bad_num_label_lines.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_missing_time(self):
"""
Test that a file which is missing the required m_present_time field for timestamps raises a sample exception
"""
# this file is missing the m_present_time label
file_handle = open(os.path.join(RESOURCE_PATH, 'no_time_label.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_short_data(self):
"""
Test that if the number of columns in the header do not match the number of columns in the data an
exception occurs
"""
# this file is has two columns removed from the data libe
file_handle = open(os.path.join(RESOURCE_PATH, 'short_data.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_bad_sensors_per_cycle(self):
"""
Test that if the number of sensors per cycle from the header does not match that in the header that an
exception in the callback occurs, but processing continues
"""
with open(os.path.join(RESOURCE_PATH, 'bad_sensors_per_cycle.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
self.assert_particles(particles, "single.yml", RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], SampleException)
def test_short_units(self):
"""
Test that if the number of label columns does not match the units number of columns an exception occurs
"""
# this file is has two columns removed from the data libe
file_handle = open(os.path.join(RESOURCE_PATH, 'short_units.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
|
oceanobservatories/mi-instrument
|
mi/dataset/parser/test/test_nutnr_m_glider.py
|
Python
|
bsd-2-clause
| 7,448 | 0.003894 |
#!/usr/bin/python
# Import System Required Paths
import sys
sys.path.append('/usr/local/src/volatility-master')
# Import Volalatility
import volatility.conf as conf
import volatility.registry as registry
registry.PluginImporter()
config = conf.ConfObject()
import volatility.commands as commands
import volatility.addrspace as addrspace
registry.register_global_options(config, commands.Command)
registry.register_global_options(config, addrspace.BaseAddressSpace)
config.parse_options()
config.PROFILE="LinuxDebian31604x64"
config.LOCATION = "vmi://debian-hvm"
# Other imports
import time
# Retrieve check creds plugin
import volatility.plugins.linux.check_creds as fopPlugin
fopData = fopPlugin.linux_check_creds(config)
invalid_fop_start_time = time.time()
for msg in fopData.calculate():
print "***Processes are sharing credential structures***"
print msg
dir(msg)
print("--- Check creds Time Taken: %s seconds ---" % (time.time() - invalid_fop_start_time))
|
muscatmat/vmi-event-naive-detector
|
scripts/check_creds.py
|
Python
|
mit
| 972 | 0.012346 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.