text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import os
import string
import codecs
import ast
import math
from vector3 import Vector3
filename_out = "../../Assets/cosine_table"
table_size = 512
fixed_point_precision = 512
def dumpCosine(_cosine_func, display_name, f):
f.write('const int ' + display_name + '[] =' + '\n')
f.write('{' + '\n')
# _str_out = '\t'
for angle in range(0,table_size):
_cos = int(_cosine_func(angle * math.pi / (table_size / 2.0)) * fixed_point_precision)
_str_out = str(_cos) + ','
f.write(_str_out + '\n')
# if angle%10 == 9:
# f.write(_str_out + '\n')
# _str_out = '\t'
f.write('};' + '\n')
def main():
## Creates the header
f = codecs.open(filename_out + '.h', 'w')
f.write('#define COSINE_TABLE_LEN ' + str(table_size) + '\n')
f.write('\n')
f.write('extern const int tcos[COSINE_TABLE_LEN];' + '\n')
f.write('extern const int tsin[COSINE_TABLE_LEN];' + '\n')
f.close()
## Creates the C file
f = codecs.open(filename_out + '.c', 'w')
dumpCosine(_cosine_func = math.cos, display_name = 'tcos', f = f)
f.write('\n')
dumpCosine(_cosine_func = math.sin, display_name = 'tsin', f = f)
f.close()
main() | voitureblanche/projet-secret | work/Python-toolchain/3D/build_cosine_tables.py | Python | mit | 1,178 | 0.043294 |
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import GLib
from gi.repository import Gtk
import xl.unicode
from xl import event, main, plugins, xdg
from xlgui.widgets import common, dialogs
from xl.nls import gettext as _, ngettext
import logging
logger = logging.getLogger(__name__)
name = _('Plugins')
ui = xdg.get_data_path('ui', 'preferences', 'plugin.ui')
class PluginManager(object):
"""
Gui to manage plugins
"""
def __init__(self, preferences, builder):
"""
Initializes the manager
"""
self.preferences = preferences
builder.connect_signals(self)
self.plugins = main.exaile().plugins
self.message = dialogs.MessageBar(
parent=builder.get_object('preferences_pane'), buttons=Gtk.ButtonsType.CLOSE
)
self.message.connect('response', self.on_messagebar_response)
self.list = builder.get_object('plugin_tree')
self.enabled_cellrenderer = builder.get_object('enabled_cellrenderer')
if main.exaile().options.Debug:
reload_cellrenderer = common.ClickableCellRendererPixbuf()
reload_cellrenderer.props.icon_name = 'view-refresh'
reload_cellrenderer.props.xalign = 1
reload_cellrenderer.connect('clicked', self.on_reload_cellrenderer_clicked)
name_column = builder.get_object('name_column')
name_column.pack_start(reload_cellrenderer, True)
name_column.add_attribute(reload_cellrenderer, 'visible', 3)
self.version_label = builder.get_object('version_label')
self.author_label = builder.get_object('author_label')
self.name_label = builder.get_object('name_label')
self.description = builder.get_object('description_view')
self.model = builder.get_object('model')
self.filter_model = self.model.filter_new()
self.show_incompatible_cb = builder.get_object('show_incompatible_cb')
self.filter_model.set_visible_func(self._model_visible_func)
selection = self.list.get_selection()
selection.connect('changed', self.on_selection_changed)
self._load_plugin_list()
self._evt_rm1 = event.add_ui_callback(
self.on_plugin_event, 'plugin_enabled', None, True
)
self._evt_rm2 = event.add_ui_callback(
self.on_plugin_event, 'plugin_disabled', None, False
)
self.list.connect('destroy', self.on_destroy)
GLib.idle_add(selection.select_path, (0,))
GLib.idle_add(self.list.grab_focus)
def _load_plugin_list(self):
"""
Loads the plugin list
"""
plugins = self.plugins.list_installed_plugins()
uncategorized = _('Uncategorized')
plugins_dict = {uncategorized: []}
failed_list = []
self.plugin_to_path = {}
for plugin_name in plugins:
try:
info = self.plugins.get_plugin_info(plugin_name)
compatible = self.plugins.is_compatible(info)
broken = self.plugins.is_potentially_broken(info)
except Exception:
failed_list += [plugin_name]
continue
# determine icon to show
if not compatible:
icon = 'dialog-error'
elif broken:
icon = 'dialog-warning'
else:
icon = None
enabled = plugin_name in self.plugins.enabled_plugins
plugin_data = (
plugin_name,
info['Name'],
str(info['Version']),
enabled,
icon,
broken,
compatible,
True,
)
if 'Category' in info:
cat = plugins_dict.setdefault(info['Category'], [])
cat.append(plugin_data)
else:
plugins_dict[uncategorized].append(plugin_data)
self.list.set_model(None)
self.model.clear()
def categorykey(item):
if item[0] == uncategorized:
return '\xff' * 10
return xl.unicode.strxfrm(item[0])
plugins_dict = sorted(plugins_dict.iteritems(), key=categorykey)
for category, plugins_list in plugins_dict:
plugins_list.sort(key=lambda x: xl.unicode.strxfrm(x[1]))
it = self.model.append(
None, (None, category, '', False, '', False, True, False)
)
for plugin_data in plugins_list:
pit = self.model.append(it, plugin_data)
path = self.model.get_string_from_iter(pit)
self.plugin_to_path[plugin_data[0]] = path
self.list.set_model(self.filter_model)
# TODO: Keep track of which categories are already expanded, and only expand those
self.list.expand_all()
if failed_list:
self.message.show_error(
_('Could not load plugin info!'),
ngettext('Failed plugin: %s', 'Failed plugins: %s', len(failed_list))
% ', '.join(failed_list),
)
def on_destroy(self, widget):
self._evt_rm1()
self._evt_rm2()
def on_messagebar_response(self, widget, response):
"""
Hides the messagebar if requested
"""
if response == Gtk.ResponseType.CLOSE:
widget.hide()
def on_plugin_tree_row_activated(self, tree, path, column):
"""
Enables or disables the selected plugin
"""
self.enabled_cellrenderer.emit('toggled', path[0])
def on_reload_cellrenderer_clicked(self, cellrenderer, path):
"""
Reloads a plugin from scratch
"""
plugin_name = self.filter_model[path][0]
enabled = self.filter_model[path][3]
if enabled:
try:
self.plugins.disable_plugin(plugin_name)
except Exception as e:
self.message.show_error(_('Could not disable plugin!'), str(e))
return
logger.info('Reloading plugin %s...', plugin_name)
self.plugins.load_plugin(plugin_name, reload_plugin=True)
if enabled:
try:
self.plugins.enable_plugin(plugin_name)
except Exception as e:
self.message.show_error(_('Could not enable plugin!'), str(e))
return
def on_install_plugin_button_clicked(self, button):
"""
Shows a dialog allowing the user to choose a plugin to install
from the filesystem
"""
dialog = Gtk.FileChooserDialog(
_('Choose a Plugin'),
self.preferences.parent,
buttons=(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_ADD,
Gtk.ResponseType.OK,
),
)
filter = Gtk.FileFilter()
filter.set_name(_('Plugin Archives'))
filter.add_pattern("*.exz")
filter.add_pattern("*.tar.gz")
filter.add_pattern("*.tar.bz2")
dialog.add_filter(filter)
filter = Gtk.FileFilter()
filter.set_name(_('All Files'))
filter.add_pattern('*')
dialog.add_filter(filter)
result = dialog.run()
dialog.hide()
if result == Gtk.ResponseType.OK:
try:
self.plugins.install_plugin(dialog.get_filename())
except plugins.InvalidPluginError as e:
self.message.show_error(_('Plugin file installation failed!'), str(e))
return
self._load_plugin_list()
def on_selection_changed(self, selection, user_data=None):
"""
Called when a row is selected
"""
model, paths = selection.get_selected_rows()
if not paths:
return
row = model[paths[0]]
if not row[7]:
self.author_label.set_label('')
self.description.get_buffer().set_text('')
self.name_label.set_label('')
return
info = self.plugins.get_plugin_info(row[0])
self.author_label.set_label(",\n".join(info['Authors']))
self.description.get_buffer().set_text(info['Description'].replace(r'\n', "\n"))
self.name_label.set_markup(
"<b>%s</b> <small>%s</small>" % (info['Name'], info['Version'])
)
def on_enabled_cellrenderer_toggled(self, cellrenderer, path):
"""
Called when the checkbox is toggled
"""
path = Gtk.TreePath.new_from_string(path)
plugin_name = self.filter_model[path][0]
if plugin_name is None:
return
enable = not self.filter_model[path][3]
if enable:
try:
self.plugins.enable_plugin(plugin_name)
except Exception as e:
self.message.show_error(_('Could not enable plugin!'), str(e))
return
else:
try:
self.plugins.disable_plugin(plugin_name)
except Exception as e:
self.message.show_error(_('Could not disable plugin!'), str(e))
return
self.on_selection_changed(self.list.get_selection())
def on_plugin_event(self, evtname, obj, plugin_name, enabled):
if hasattr(self.plugins.loaded_plugins[plugin_name], 'get_preferences_pane'):
self.preferences._load_plugin_pages()
path = self.plugin_to_path[plugin_name]
self.model[path][3] = enabled
def on_show_incompatible_cb_toggled(self, widget):
self.filter_model.refilter()
def _model_visible_func(self, model, iter, data):
row = model[iter]
compatible = row[6]
return compatible or self.show_incompatible_cb.get_active()
def init(preferences, xml):
PluginManager(preferences, xml)
| genodeftest/exaile | xlgui/preferences/plugin.py | Python | gpl-2.0 | 11,178 | 0.000716 |
"""
Generic Image analyzer.
"""
# Standard
import os
import logging
import subprocess
# Damn
from damn_at import (
mimetypes,
MetaDataType,
MetaDataValue,
FileId,
FileDescription,
AssetDescription,
AssetId
)
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzer import AnalyzerException
LOG = logging.getLogger(__name__)
class GenericImageAnalyzer(IAnalyzer):
"""Generic Image analyzer."""
handled_types = ["image/x-ms-bmp", "image/jpeg", "image/png", "image/gif",
"image/x-photoshop", "image/tiff", "application/x-xcf"]
def __init__(self):
super(GenericImageAnalyzer, self).__init__()
def activate(self):
pass
def analyze(self, an_uri):
fileid = FileId(filename=os.path.abspath(an_uri))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
image_mimetype = mimetypes.guess_type(an_uri)[0]
asset_descr = AssetDescription(asset=AssetId(
subname='main layer',
mimetype=image_mimetype,
file=fileid
))
try:
pro = subprocess.Popen(
['exiftool', an_uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = pro.communicate()
if pro.returncode != 0:
msg = 'ImageAnalyzer failed %s with error code %d!:\n%s' % (
an_uri,
pro.returncode,
str(err)
)
LOG.error(msg)
raise AnalyzerException(msg)
except OSError as e:
msg = 'ImageAnalyzer failed %s:\n%s' % (an_uri, e)
LOG.error(msg)
raise OSError(msg)
meta = {}
flag = 0
lines = str(out).strip().split('\n')
for line in lines:
line = line.split(':', 1)
if len(line) == 1:
line = line[0].split('=')
line = [l.strip() for l in line]
if line[0] == 'MIME Type':
flag = 1
if flag == 1 and line[0] not in ['MIME Type', 'Image Size']:
meta[line[0].lower().replace(' ', '_')] = line[1]
from damn_at.analyzers.image import metadata
extractor_map = {
'image/png': metadata.MetaDataPNG,
'image/jpeg': metadata.MetaDataJPG,
'image/x-ms-bmp': metadata.MetaDataBMP,
'image/x-photoshop': metadata.MetaDataPSD,
'application/x-xcf': metadata.MetaDataXCF,
}
if image_mimetype in extractor_map:
asset_descr.metadata = extractor_map[image_mimetype].extract(meta)
else:
asset_descr.metadata = {}
for key, value in meta.items():
if key not in asset_descr.metadata:
asset_descr.metadata['exif-' + key] = MetaDataValue(
type=MetaDataType.STRING,
string_value=value
)
file_descr.assets.append(asset_descr)
return file_descr
| peragro/peragro-at | src/damn_at/analyzers/image/analyzerimage.py | Python | bsd-3-clause | 3,097 | 0.000323 |
limit = 10 ** 4
def isOK(a1,a2,a3,m):
'''test if m is in the same plan as a3 vis-a-vis to a1a2'''
x1, y1= float(a1[0]), float(a1[1])
x2,y2= float(a2[0]), float(a2[1])
x3,y3=float(a3[0]), float(a3[1])
x,y=float(m[0]), float(m[1])
t = (x-x1) * (y2-y1) - (y-y1) * (x2-x1)
k = (x3-x1) * (y2-y1) - (y3-y1) * (x2-x1)
if t*k > 0:
return True
return False
def isInterior(a1,a2,a3,m) :
'''test if m is in the triangle formed by a,b,c
'''
if isOK(a1,a2,a3,m) and isOK(a2,a3,a1,m) and isOK(a3,a1,a2,m):
return True
def test():
a1 =(-340,495)
a2= (-153,-910)
a3 = (835,-947)
X= (-175,41)
Y= (-421,-714)
Z = (574,-645)
m = (0,0)
print isInterior(a1,a2,a3,m), isInterior(X,Y,Z,m)
print intersection(X,m,Y,Z)
# test()
def main():
inp= file('triangles.txt')
count = 0
O = [0,0]
TRI= []
t = inp.readline()
while (t!=""):
l= t.strip().split(',')
x = [int(l[0]), int(l[1])]
y = [int(l[2]), int(l[3])]
z = [int(l[4]), int(l[5])]
if isInterior(x,y,z,O):
count +=1
# print x,y,z
# else: count+=1
t = inp.readline()
print 'count', count
main()
| nguyenkims/projecteuler-python | src/p102.py | Python | mit | 1,087 | 0.103036 |
# hackerrank - Algorithms: Time Conversion
# Written by James Andreou, University of Waterloo
S = raw_input()
TYPE = S[len(S)-2]
if S[:2] == "12":
if TYPE == "A":
print "00" + S[2:-2]
else:
print S[:-2]
elif TYPE == "P":
HOUR = int(S[:2]) + 12
print str(HOUR) + S[2:-2]
else:
print S[:-2] | jamesandreou/hackerrank-solutions | warmup/hr_time_conversion.py | Python | mit | 298 | 0.026846 |
__author__ = 'Stephanie'
from ODMconnection import dbconnection
from readSensors import readSensors
from updateSensors import updateSensors
from createSensors import createSensors
from deleteSensors import deleteSensors
__all__ = [
'readSensors',
'updateSensors',
'createSensors',
'deleteSensors',
] | Castronova/EMIT | api_old/ODM2/Sensors/services/__init__.py | Python | gpl-2.0 | 319 | 0.00627 |
"""
:Author: Engelbert Gruber
:Contact: grubert@users.sourceforge.net
:Revision: $Revision: 21817 $
:Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
:Copyright: This module has been placed in the public domain.
LaTeX2e document tree Writer.
"""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # e.g. ##.
import sys
import time
import re
import string
from types import ListType
from docutils import frontend, nodes, languages, writers, utils
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
settings_spec = (
'LaTeX-Specific Options',
'The LaTeX "--output-encoding" default is "latin-1:strict".',
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "10pt,a4paper".',
['--documentoptions'],
{'default': '10pt,a4paper', }),
('Use LaTeX footnotes. LaTeX supports only numbered footnotes (does it?). '
'Default: no, uses figures.',
['--use-latex-footnotes'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use LaTeX citations. '
'Default: no, uses figures which might get mixed with images.',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify a stylesheet file. The file will be "input" by latex in '
'the document header. Default is no stylesheet (""). '
'Overrides --stylesheet-path.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
'directory. Overrides --stylesheet.',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Table of contents by docutils (default) or latex. Latex (writer) '
'supports only one ToC per document, but docutils does not write '
'pagenumbers.',
['--use-latex-toc'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Let LaTeX print author and date, do not show it in docutils '
'document info.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. This is '
'the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators. '
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use verbatim for literal-blocks. '
'Default is to always use the mbox environment.',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "nolines". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines'], 'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "T1", "OT1", "" or some other fontenc option. '
'The font encoding influences available symbols, e.g. "<<" as one '
'character. Default is "" which leads to package "ae" (a T1 '
'emulation using CM fonts).',
['--font-encoding'],
{'default': ''}),
),)
settings_defaults = {'output_encoding': 'latin-1'}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
self.head_prefix = visitor.head_prefix
self.head = visitor.head
self.body_prefix = visitor.body_prefix
self.body = visitor.body
self.body_suffix = visitor.body_suffix
"""
Notes on LaTeX
--------------
* latex does not support multiple tocs in one document.
(might be no limitation except for docutils documentation)
* width
* linewidth - width of a line in the local environment
* textwidth - the width of text on the page
Maybe always use linewidth ?
*Bug* inside a minipage a (e.g. Sidebar) the linewidth is
not changed, needs fix in docutils so that tables
are not too wide.
So we add locallinewidth set it initially and
on entering sidebar and reset on exit.
"""
class Babel:
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self,lang):
self.language = lang
# pdflatex does not produce double quotes for ngerman in tt.
self.double_quote_replacment = None
if re.search('^de',self.language):
#self.quotes = ("\"`", "\"'")
self.quotes = ('{\\glqq}', '{\\grqq}')
self.double_quote_replacment = "{\\dq}"
else:
self.quotes = ("``", "''")
self.quote_index = 0
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1)%2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def double_quotes_in_tt (self,text):
if not self.double_quote_replacment:
return text
return text.replace('"', self.double_quote_replacment)
def get_language(self):
if self._ISO639_TO_BABEL.has_key(self.language):
return self._ISO639_TO_BABEL[self.language]
else:
# support dialects.
l = self.language.split("_")[0]
if self._ISO639_TO_BABEL.has_key(l):
return self._ISO639_TO_BABEL[l]
return None
latex_headings = {
'optionlist_environment' : [
'\\newcommand{\\optionlistlabel}[1]{\\bf #1 \\hfill}\n'
'\\newenvironment{optionlist}[1]\n'
'{\\begin{list}{}\n'
' {\\setlength{\\labelwidth}{#1}\n'
' \\setlength{\\rightmargin}{1cm}\n'
' \\setlength{\\leftmargin}{\\rightmargin}\n'
' \\addtolength{\\leftmargin}{\\labelwidth}\n'
' \\addtolength{\\leftmargin}{\\labelsep}\n'
' \\renewcommand{\\makelabel}{\\optionlistlabel}}\n'
'}{\\end{list}}\n',
],
'lineblock_environment' : [
'\\newlength{\\lineblockindentation}\n'
'\\setlength{\\lineblockindentation}{2.5em}\n'
'\\newenvironment{lineblock}[1]\n'
'{\\begin{list}{}\n'
' {\\setlength{\\partopsep}{\\parskip}\n'
' \\addtolength{\\partopsep}{\\baselineskip}\n'
' \\topsep0pt\\itemsep0.15\\baselineskip\\parsep0pt\n'
' \\leftmargin#1}\n'
' \\raggedright}\n'
'{\\end{list}}\n'
],
'footnote_floats' : [
'% begin: floats for footnotes tweaking.\n',
'\\setlength{\\floatsep}{0.5em}\n',
'\\setlength{\\textfloatsep}{\\fill}\n',
'\\addtolength{\\textfloatsep}{3em}\n',
'\\renewcommand{\\textfraction}{0.5}\n',
'\\renewcommand{\\topfraction}{0.5}\n',
'\\renewcommand{\\bottomfraction}{0.5}\n',
'\\setcounter{totalnumber}{50}\n',
'\\setcounter{topnumber}{50}\n',
'\\setcounter{bottomnumber}{50}\n',
'% end floats for footnotes\n',
],
'some_commands' : [
'% some commands, that could be overwritten in the style file.\n'
'\\newcommand{\\rubric}[1]'
'{\\subsection*{~\\hfill {\\it #1} \\hfill ~}}\n'
'\\newcommand{\\titlereference}[1]{\\textsl{#1}}\n'
'% end of "some commands"\n',
]
}
class DocumentClass:
"""Details of a LaTeX document class."""
# BUG: LaTeX has no deeper sections (actually paragrah is no
# section either).
# BUG: No support for unknown document classes. Make 'article'
# default?
_class_sections = {
'book': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'scrbook': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'report': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'scrreprt': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'article': ( 'section', 'subsection', 'subsubsection' ),
'scrartcl': ( 'section', 'subsection', 'subsubsection' ),
}
_deepest_section = 'subsubsection'
def __init__(self, document_class):
self.document_class = document_class
def section(self, level):
""" Return the section name at the given level for the specific
document class.
Level is 1,2,3..., as level 0 is the title."""
sections = self._class_sections[self.document_class]
if level <= len(sections):
return sections[level-1]
else:
return self._deepest_section
class Table:
""" Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
"""
def __init__(self,latex_type,table_style):
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
def open(self):
self._open = 1
self._col_specs = []
self.caption = None
self._attrs = {}
self._in_head = 0 # maybe context with search
def close(self):
self._open = 0
self._col_specs = None
self.caption = None
self._attrs = {}
def is_open(self):
return self._open
def used_packages(self):
if self._table_style == 'booktabs':
return '\\usepackage{booktabs}\n'
return ''
def get_latex_type(self):
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if self._attrs.has_key(attr):
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row, because we.
def get_opening(self):
return '\\begin{%s}[c]' % self._latex_type
def get_closing(self):
line = ""
if self._table_style == 'booktabs':
line = '\\bottomrule\n'
elif self._table_style == 'standard':
lines = '\\hline\n'
return '%s\\end{%s}' % (line,self._latex_type)
def visit_colspec(self,node):
self._col_specs.append(node)
def get_colspecs(self):
"""
Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ""
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += "%sp{%.2f\\locallinewidth}" % (bar,colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
""" return columnwidth for current cell (not multicell)
"""
return "%.2f\\locallinewidth" % self._col_width[self._cell_in_row-1]
def visit_thead(self):
self._in_thead = 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
a.append('\\endhead\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead = 0
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = []
for i in range(len(self._rowspan)):
if (self._rowspan[i]<=0):
rowspans.append(i+1)
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
latex_head = '\\documentclass[%s]{%s}\n'
encoding = '\\usepackage[%s]{inputenc}\n'
linking = '\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n'
stylesheet = '\\input{%s}\n'
# add a generated on day , machine by user using docutils version.
generator = '%% generator Docutils: http://docutils.sourceforge.net/\n'
# use latex tableofcontents or let docutils do it.
use_latex_toc = 0
# TODO: use mixins for different implementations.
# list environment for option-list. else tabularx
use_optionlist_for_option_list = 1
# list environment for docinfo. else tabularx
use_optionlist_for_docinfo = 0 # NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = 0
# If using compound enumerations, include section information.
section_prefix_for_enumerators = 0
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# default link color
hyperlink_color = "blue"
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self.use_latex_footnotes = settings.use_latex_footnotes
self._use_latex_citations = settings.use_latex_citations
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = settings.font_encoding
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', '\\_'))
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
# language: labels, bibliographic_fields, and author_separators.
# to allow writing labes for specific languages.
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
self.d_options = self.settings.documentoptions
if self.babel.get_language():
self.d_options += ',%s' % \
self.babel.get_language()
self.d_class = DocumentClass(settings.documentclass)
# object for a table while proccessing.
self.active_table = Table('longtable',settings.table_style)
# HACK. Should have more sophisticated typearea handling.
if settings.documentclass.find('scr') == -1:
self.typearea = '\\usepackage[DIV12]{typearea}\n'
else:
if self.d_options.find('DIV') == -1 and self.d_options.find('BCOR') == -1:
self.typearea = '\\typearea{12}\n'
else:
self.typearea = ''
if self.font_encoding == 'OT1':
fontenc_header = ''
elif self.font_encoding == '':
fontenc_header = '\\usepackage{ae}\n\\usepackage{aeguill}\n'
else:
fontenc_header = '\\usepackage[%s]{fontenc}\n' % (self.font_encoding,)
input_encoding = self.encoding % self.latex_encoding
if self.settings.graphicx_option == '':
self.graphicx_package = '\\usepackage{graphicx}\n'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = '\n'.join(
('%Check if we are compiling under latex or pdflatex',
'\\ifx\\pdftexversion\\undefined',
' \\usepackage{graphicx}',
'\\else',
' \\usepackage[pdftex]{graphicx}',
'\\fi\n'))
else:
self.graphicx_package = (
'\\usepackage[%s]{graphicx}\n' % self.settings.graphicx_option)
self.head_prefix = [
self.latex_head % (self.d_options,self.settings.documentclass),
'\\usepackage{babel}\n', # language is in documents settings.
fontenc_header,
'\\usepackage{shortvrb}\n', # allows verb in footnotes.
input_encoding,
# * tabularx: for docinfo, automatic width of columns, always on one page.
'\\usepackage{tabularx}\n',
'\\usepackage{longtable}\n',
self.active_table.used_packages(),
# possible other packages.
# * fancyhdr
# * ltxtable is a combination of tabularx and longtable (pagebreaks).
# but ??
#
# extra space between text in tables and the line above them
'\\setlength{\\extrarowheight}{2pt}\n',
'\\usepackage{amsmath}\n', # what fore amsmath.
self.graphicx_package,
'\\usepackage{color}\n',
'\\usepackage{multirow}\n',
'\\usepackage{ifthen}\n', # before hyperref!
self.linking % (self.colorlinks, self.hyperlink_color, self.hyperlink_color),
self.typearea,
self.generator,
# latex lengths
'\\newlength{\\admonitionwidth}\n',
'\\setlength{\\admonitionwidth}{0.9\\textwidth}\n'
# width for docinfo tablewidth
'\\newlength{\\docinfowidth}\n',
'\\setlength{\\docinfowidth}{0.9\\textwidth}\n'
# linewidth of current environment, so tables are not wider
# than the sidebar: using locallinewidth seems to defer evaluation
# of linewidth, this is fixing it.
'\\newlength{\\locallinewidth}\n',
# will be set later.
]
self.head_prefix.extend( latex_headings['optionlist_environment'] )
self.head_prefix.extend( latex_headings['lineblock_environment'] )
self.head_prefix.extend( latex_headings['footnote_floats'] )
self.head_prefix.extend( latex_headings['some_commands'] )
## stylesheet is last: so it might be possible to overwrite defaults.
stylesheet = utils.get_stylesheet_reference(settings)
if stylesheet:
settings.record_dependencies.add(stylesheet)
self.head_prefix.append(self.stylesheet % (stylesheet))
if self.linking: # and maybe check for pdf
self.pdfinfo = [ ]
self.pdfauthor = None
# pdftitle, pdfsubject, pdfauthor, pdfkeywords, pdfcreator, pdfproducer
else:
self.pdfinfo = None
# NOTE: Latex wants a date and an author, rst puts this into
# docinfo, so normally we donot want latex author/date handling.
# latex article has its own handling of date and author, deactivate.
# So we always emit \title{...} \author{...} \date{...}, even if the
# "..." are empty strings.
self.head = [ ]
# separate title, so we can appen subtitle.
self.title = ''
# if use_latex_docinfo: collects lists of author/organization/contact/address lines
self.author_stack = []
self.date = ''
self.body_prefix = ['\\raggedbottom\n']
self.body = []
self.body_suffix = ['\n']
self.section_level = 0
self.context = []
self.topic_classes = []
# column specification for tables
self.table_caption = None
# Flags to encode
# ---------------
# verbatim: to tell encode not to encode.
self.verbatim = 0
# insert_newline: to tell encode to replace blanks by "~".
self.insert_none_breaking_blanks = 0
# insert_newline: to tell encode to add latex newline.
self.insert_newline = 0
# mbox_newline: to tell encode to add mbox and newline.
self.mbox_newline = 0
# enumeration is done by list environment.
self._enum_cnt = 0
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration
self._enumeration_counters = []
self._bibitems = []
# docinfo.
self.docinfo = None
# inside literal block: no quote mangling.
self.literal_block = 0
self.literal_block_stack = []
self.literal = 0
# true when encoding in math mode
self.mathmode = 0
def to_latex_encoding(self,docutils_encoding):
"""
Translate docutils encoding name into latex's.
Default fallback method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { "iso-8859-1": "latin1", # west european
"iso-8859-2": "latin2", # east european
"iso-8859-3": "latin3", # esperanto, maltese
"iso-8859-4": "latin4", # north european,scandinavian, baltic
"iso-8859-5": "iso88595", # cyrillic (ISO)
"iso-8859-9": "latin5", # turkish
"iso-8859-15": "latin9", # latin9, update to latin1.
"mac_cyrillic": "maccyr", # cyrillic (on Mac)
"windows-1251": "cp1251", # cyrillic (on Windows)
"koi8-r": "koi8-r", # cyrillic (Russian)
"koi8-u": "koi8-u", # cyrillic (Ukrainian)
"windows-1250": "cp1250", #
"windows-1252": "cp1252", #
"us-ascii": "ascii", # ASCII (US)
# unmatched encodings
#"": "applemac",
#"": "ansinew", # windows 3.1 ansi
#"": "ascii", # ASCII encoding for the range 32--127.
#"": "cp437", # dos latine us
#"": "cp850", # dos latin 1
#"": "cp852", # dos latin 2
#"": "decmulti",
#"": "latin10",
#"iso-8859-6": "" # arabic
#"iso-8859-7": "" # greek
#"iso-8859-8": "" # hebrew
#"iso-8859-10": "" # latin6, more complete iso-8859-4
}
if tr.has_key(docutils_encoding.lower()):
return tr[docutils_encoding.lower()]
return docutils_encoding.translate(string.maketrans("",""),"_-").lower()
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
latex_equivalents = {
u'\u00A0' : '~',
u'\u2013' : '{--}',
u'\u2014' : '{---}',
u'\u2018' : '`',
u'\u2019' : '\'',
u'\u201A' : ',',
u'\u201C' : '``',
u'\u201D' : '\'\'',
u'\u201E' : ',,',
u'\u2020' : '{\\dag}',
u'\u2021' : '{\\ddag}',
u'\u2026' : '{\\dots}',
u'\u2122' : '{\\texttrademark}',
u'\u21d4' : '{$\\Leftrightarrow$}',
}
def unicode_to_latex(self,text):
# see LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# Only some special chracters are translated, for documents with many
# utf-8 chars one should use the LaTeX unicode package.
for uchar in self.latex_equivalents.keys():
text = text.replace(uchar,self.latex_equivalents[uchar])
return text
def encode(self, text):
"""
Encode special characters in `text` & return.
# $ % & ~ _ ^ \ { }
Escaping with a backslash does not help with backslashes, ~ and ^.
< > are only available in math-mode or tt font. (really ?)
$ starts math- mode.
AND quotes:
"""
if self.verbatim:
return text
# compile the regexps once. do it here so one can see them.
#
# first the braces.
if not self.__dict__.has_key('encode_re_braces'):
self.encode_re_braces = re.compile(r'([{}])')
text = self.encode_re_braces.sub(r'{\\\1}',text)
if not self.__dict__.has_key('encode_re_bslash'):
# find backslash: except in the form '{\{}' or '{\}}'.
self.encode_re_bslash = re.compile(r'(?<!{)(\\)(?![{}]})')
# then the backslash: except in the form from line above:
# either '{\{}' or '{\}}'.
text = self.encode_re_bslash.sub(r'{\\textbackslash}', text)
# then dollar
text = text.replace("$", '{\\$}')
if not ( self.literal_block or self.literal or self.mathmode ):
# the vertical bar: in mathmode |,\vert or \mid
# in textmode \textbar
text = text.replace("|", '{\\textbar}')
text = text.replace("<", '{\\textless}')
text = text.replace(">", '{\\textgreater}')
# then
text = text.replace("&", '{\\&}')
# the ^:
# * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work.
# text = text.replace("^", '{\\ensuremath{^\\wedge}}')
text = text.replace("^", '{\\textasciicircum}')
text = text.replace("%", '{\\%}')
text = text.replace("#", '{\\#}')
text = text.replace("~", '{\\textasciitilde}')
# Separate compound characters, e.g. "--" to "-{}-". (The
# actual separation is done later; see below.)
separate_chars = '-'
if self.literal_block or self.literal:
# In monospace-font, we also separate ",,", "``" and "''"
# and some other characters which can't occur in
# non-literal text.
separate_chars += ',`\'"<>'
# pdflatex does not produce doublequotes for ngerman.
text = self.babel.double_quotes_in_tt(text)
if self.font_encoding == 'OT1':
# We're using OT1 font-encoding and have to replace
# underscore by underlined blank, because this has
# correct width.
text = text.replace('_', '{\\underline{ }}')
# And the tt-backslash doesn't work in OT1, so we use
# a mirrored slash.
text = text.replace('\\textbackslash', '\\reflectbox{/}')
else:
text = text.replace('_', '{\\_}')
else:
text = self.babel.quote_quotes(text)
text = text.replace("_", '{\\_}')
for char in separate_chars * 2:
# Do it twice ("* 2") becaues otherwise we would replace
# "---" by "-{}--".
text = text.replace(char + char, char + '{}' + char)
if self.insert_newline or self.literal_block:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace("\n", '~\\\\\n')
elif self.mbox_newline:
if self.literal_block:
closings = "}" * len(self.literal_block_stack)
openings = "".join(self.literal_block_stack)
else:
closings = ""
openings = ""
text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings))
# lines starting with "[" give errors.
text = text.replace('[', '{[}')
if self.insert_none_breaking_blanks:
text = text.replace(' ', '~')
if self.latex_encoding != 'utf8':
text = self.unicode_to_latex(text)
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
def astext(self):
if self.pdfinfo is not None:
if self.pdfauthor:
self.pdfinfo.append('pdfauthor={%s}' % self.pdfauthor)
if self.pdfinfo:
pdfinfo = '\\hypersetup{\n' + ',\n'.join(self.pdfinfo) + '\n}\n'
else:
pdfinfo = ''
head = '\\title{%s}\n\\author{%s}\n\\date{%s}\n' % \
(self.title,
' \\and\n'.join(['~\\\\\n'.join(author_lines)
for author_lines in self.author_stack]),
self.date)
return ''.join(self.head_prefix + [head] + self.head + [pdfinfo]
+ self.body_prefix + self.body + self.body_suffix)
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node, name=''):
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\parbox{\\admonitionwidth}{\n')
if name:
self.body.append('\\textbf{\\large '+ self.language.labels[name] + '}\n');
self.body.append('\\vspace{2mm}\n')
def depart_admonition(self, node=None):
self.body.append('}}\n') # end parbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
self.depart_admonition()
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.body.append( '\\begin{quote}\n')
def depart_block_quote(self, node):
self.body.append( '\\end{quote}\n')
def visit_bullet_list(self, node):
if 'contents' in self.topic_classes:
if not self.use_latex_toc:
self.body.append( '\\begin{list}{}{}\n' )
else:
self.body.append( '\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if 'contents' in self.topic_classes:
if not self.use_latex_toc:
self.body.append( '\\end{list}\n' )
else:
self.body.append( '\\end{itemize}\n' )
# Imperfect superscript/subscript handling: mathmode italicizes
# all letters by default.
def visit_superscript(self, node):
self.body.append('$^{')
self.mathmode = 1
def depart_superscript(self, node):
self.body.append('}$')
self.mathmode = 0
def visit_subscript(self, node):
self.body.append('$_{')
self.mathmode = 1
def depart_subscript(self, node):
self.body.append('}$')
self.mathmode = 0
def visit_caption(self, node):
self.body.append( '\\caption{' )
def depart_caption(self, node):
self.body.append('}')
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
self.depart_admonition()
def visit_title_reference(self, node):
self.body.append( '\\titlereference{' )
def depart_title_reference(self, node):
self.body.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.context.append(len(self.body))
else:
self.body.append('\\begin{figure}[b]')
for id in node['ids']:
self.body.append('\\hypertarget{%s}' % id)
def depart_citation(self, node):
if self._use_latex_citations:
size = self.context.pop()
label = self.body[size]
text = ''.join(self.body[size+1:])
del self.body[size:]
self._bibitems.append([label, text])
else:
self.body.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
self.body.append('\\cite{')
else:
href = ''
if node.has_key('refid'):
href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
self.body.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
self.body.append('}')
else:
self.body.append('}]')
def visit_classifier(self, node):
self.body.append( '(\\textbf{' )
def depart_classifier(self, node):
self.body.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Escape end of line by a new comment start in comment text.
self.body.append('%% %s \n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
self.depart_admonition()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('%[visit_definition]\n')
def depart_definition(self, node):
self.body.append('\n')
self.body.append('%[depart_definition]\n')
def visit_definition_list(self, node):
self.body.append( '\\begin{description}\n' )
def depart_definition_list(self, node):
self.body.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
self.body.append('%[visit_definition_list_item]\n')
def depart_definition_list_item(self, node):
self.body.append('%[depart_definition_list_item]\n')
def visit_description(self, node):
if self.use_optionlist_for_option_list:
self.body.append( ' ' )
else:
self.body.append( ' & ' )
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.docinfo = []
self.docinfo.append('%' + '_'*75 + '\n')
self.docinfo.append('\\begin{center}\n')
self.docinfo.append('\\begin{tabularx}{\\docinfowidth}{lX}\n')
def depart_docinfo(self, node):
self.docinfo.append('\\end{tabularx}\n')
self.docinfo.append('\\end{center}\n')
self.body = self.docinfo + self.body
# clear docinfo, so field names are no longer appended.
self.docinfo = None
def visit_docinfo_item(self, node, name):
if name == 'author':
if not self.pdfinfo == None:
if not self.pdfauthor:
self.pdfauthor = self.attval(node.astext())
else:
self.pdfauthor += self.author_separator + self.attval(node.astext())
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group (for
# no better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = 1
text = self.encode(node.astext())
self.insert_newline = 0
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date = self.attval(node.astext())
raise nodes.SkipNode
self.docinfo.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.docinfo.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
self.context.append(self.docinfo)
self.context.append(len(self.body))
def depart_docinfo_item(self, node):
size = self.context.pop()
dest = self.context.pop()
tail = self.context.pop()
tail = self.body[size:] + [tail]
del self.body[size:]
dest.extend(tail)
# for address we did set insert_newline
self.insert_newline = 0
def visit_doctest_block(self, node):
self.body.append( '\\begin{verbatim}' )
self.verbatim = 1
def depart_doctest_block(self, node):
self.body.append( '\\end{verbatim}\n' )
self.verbatim = 0
def visit_document(self, node):
self.body_prefix.append('\\begin{document}\n')
# titled document?
if self.use_latex_docinfo or len(node) and isinstance(node[0], nodes.title):
self.body_prefix.append('\\maketitle\n\n')
# alternative use titlepage environment.
# \begin{titlepage}
self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
def depart_document(self, node):
# TODO insertion point of bibliography should none automatic.
if self._use_latex_citations and len(self._bibitems)>0:
widest_label = ""
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.body.append('\n\\begin{thebibliography}{%s}\n'%widest_label)
for bi in self._bibitems:
self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], bi[0], bi[1]))
self.body.append('\\end{thebibliography}\n')
self.body_suffix.append('\\end{document}\n')
def visit_emphasis(self, node):
self.body.append('\\emph{')
self.literal_block_stack.append('\\emph{')
def depart_emphasis(self, node):
self.body.append('}')
self.literal_block_stack.pop()
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
if self.active_table.get_entry_number() == 1:
# if the firstrow is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.body.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.body.append(' & ')
# multi{row,column}
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if node.has_key('morerows') and node.has_key('morecols'):
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if node.has_key('morerows'):
count = node['morerows'] + 1
self.active_table.set_rowspan(self.active_table.get_entry_number()-1,count)
self.body.append('\\multirow{%d}{%s}{' % \
(count,self.active_table.get_column_width()))
self.context.append('}')
# BUG following rows must have empty cells.
elif node.has_key('morecols'):
# the vertical bar before column is missing if it is the first column.
# the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.body.append('\\multicolumn{%d}{%sl%s}{' % \
(count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.body.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.body.append(self.context.pop()) # header / not header
self.body.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.body.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.body.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
self._enum_cnt += 1
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ""
if node.has_key('suffix'):
enum_suffix = node['suffix']
enum_prefix = ""
if node.has_key('prefix'):
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ""
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for counter in self._enumeration_counters:
enum_prefix += counter + '.'
enum_type = "arabic"
if node.has_key('enumtype'):
enum_type = node['enumtype']
if enum_style.has_key(enum_type):
enum_type = enum_style[enum_type]
counter_name = "listcnt%d" % self._enum_cnt;
self._enumeration_counters.append("\\%s{%s}" % (enum_type,counter_name))
self.body.append('\\newcounter{%s}\n' % counter_name)
self.body.append('\\begin{list}{%s\\%s{%s}%s}\n' % \
(enum_prefix,enum_type,counter_name,enum_suffix))
self.body.append('{\n')
self.body.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if node.has_key('start'):
self.body.append('\\addtocounter{%s}{%d}\n' \
% (counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.body.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.body.append('}\n')
def depart_enumerated_list(self, node):
self.body.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_error(self, node):
self.visit_admonition(node, 'error')
def depart_error(self, node):
self.depart_admonition()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.body.append('\n')
##self.body.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.body.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.body.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
# BUG by attach as text we loose references.
if self.docinfo:
self.docinfo.append('%s \\\\\n' % self.encode(node.astext()))
raise nodes.SkipNode
# BUG: what happens if not docinfo
def depart_field_body(self, node):
self.body.append( '\n' )
def visit_field_list(self, node):
if not self.docinfo:
self.body.append('\\begin{quote}\n')
self.body.append('\\begin{description}\n')
def depart_field_list(self, node):
if not self.docinfo:
self.body.append('\\end{description}\n')
self.body.append('\\end{quote}\n')
def visit_field_name(self, node):
# BUG this duplicates docinfo_item
if self.docinfo:
self.docinfo.append('\\textbf{%s}: &\n\t' % self.encode(node.astext()))
raise nodes.SkipNode
else:
self.body.append('\\item [')
def depart_field_name(self, node):
if not self.docinfo:
self.body.append(':]')
def visit_figure(self, node):
self.body.append( '\\begin{figure}[htbp]\\begin{center}\n' )
def depart_figure(self, node):
self.body.append( '\\end{center}\\end{figure}\n' )
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = (['\n\\begin{center}\small\n']
+ self.body[start:] + ['\n\\end{center}\n'])
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
if self.use_latex_footnotes:
num,text = node.astext().split(None,1)
num = self.encode(num.strip())
self.body.append('\\footnotetext['+num+']')
self.body.append('{')
else:
self.body.append('\\begin{figure}[b]')
for id in node['ids']:
self.body.append('\\hypertarget{%s}' % id)
def depart_footnote(self, node):
if self.use_latex_footnotes:
self.body.append('}\n')
else:
self.body.append('\\end{figure}\n')
def visit_footnote_reference(self, node):
if self.use_latex_footnotes:
self.body.append("\\footnotemark["+self.encode(node.astext())+"]")
raise nodes.SkipNode
href = ''
if node.has_key('refid'):
href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
elif format == 'superscript':
suffix = '\\raisebox{.5em}[0em]{\\scriptsize'
self.context.append('}')
else: # shouldn't happen
raise AssertionError('Illegal footnote reference format.')
self.body.append('%s\\hyperlink{%s}{' % (suffix,href))
def depart_footnote_reference(self, node):
if self.use_latex_footnotes:
return
self.body.append('}%s' % self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if self.use_latex_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.body.append(bracket)
else:
self.body.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.body.append(bracket)
def visit_label(self, node):
self.label_delim(node, '[', '$^{')
def depart_label(self, node):
self.label_delim(node, ']', '}$')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
self.body_prefix.append('\n\\verb|begin_header|\n')
self.body_prefix.extend(self.body[start:])
self.body_prefix.append('\n\\verb|end_header|\n')
del self.body[start:]
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
self.depart_admonition()
def visit_image(self, node):
attrs = node.attributes
# Add image URI to dependency list, assuming that it's
# referring to a local file.
self.settings.record_dependencies.add(attrs['uri'])
pre = [] # in reverse order
post = ['\\includegraphics{%s}' % attrs['uri']]
inline = isinstance(node.parent, nodes.TextElement)
if attrs.has_key('scale'):
# Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}')
if attrs.has_key('align'):
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
(1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
(0, 'center'): ('{\\hfill', '\\hfill}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', '\\hfill}'),
(0, 'right'): ('{\\hfill', '}'),}
try:
pre.append(align_prepost[inline, attrs['align']][0])
post.append(align_prepost[inline, attrs['align']][1])
except KeyError:
pass # XXX complain here?
if not inline:
pre.append('\n')
post.append('\n')
pre.reverse()
self.body.extend(pre + post)
def depart_image(self, node):
pass
def visit_important(self, node):
self.visit_admonition(node, 'important')
def depart_important(self, node):
self.depart_admonition()
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.body.append('{\\small ')
def depart_legend(self, node):
self.body.append('}')
def visit_line(self, node):
self.body.append('\item[] ')
def depart_line(self, node):
self.body.append('\n')
def visit_line_block(self, node):
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[] \n'
'\\begin{lineblock}{\\lineblockindentation}\n')
else:
self.body.append('\n\\begin{lineblock}{0em}\n')
def depart_line_block(self, node):
self.body.append('\\end{lineblock}\n')
def visit_list_item(self, node):
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append('\\item {} ')
def depart_list_item(self, node):
self.body.append('\n')
def visit_literal(self, node):
self.literal = 1
self.body.append('\\texttt{')
def depart_literal(self, node):
self.body.append('}')
self.literal = 0
def visit_literal_block(self, node):
"""
Render a literal-block.
Literal blocks are used for "::"-prefixed literal-indented
blocks of text, where the inline markup is not recognized,
but are also the product of the parsed-literal directive,
where the markup is respected.
"""
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox.
#
# We can distinguish between the two kinds by the number of
# siblings the compose this node: if it is composed by a
# single element, it's surely is either a real one, otherwise
# it's a parsed-literal that does not contain any markup.
#
if (self.settings.use_verbatim_when_possible and (len(node) == 1)
# in case of a parsed-literal containing just a "**bold**" word:
and isinstance(node[0], nodes.Text)):
self.verbatim = 1
self.body.append('\\begin{quote}\\begin{verbatim}\n')
else:
self.literal_block = 1
self.insert_none_breaking_blanks = 1
if self.active_table.is_open():
self.body.append('\n{\\ttfamily \\raggedright \\noindent\n')
else:
# no quote inside tables, to avoid vertical sppace between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.body.append('\\begin{quote}')
self.body.append('{\\ttfamily \\raggedright \\noindent\n')
# * obey..: is from julien and never worked for me (grubert).
# self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n')
def depart_literal_block(self, node):
if self.verbatim:
self.body.append('\n\\end{verbatim}\\end{quote}\n')
self.verbatim = 0
else:
if self.active_table.is_open():
self.body.append('\n}\n')
else:
self.body.append('\n')
self.body.append('}\\end{quote}\n')
self.insert_none_breaking_blanks = 0
self.literal_block = 0
# obey end: self.body.append('}\n')
def visit_meta(self, node):
self.body.append('[visit_meta]\n')
# BUG maybe set keywords for pdf
##self.head.append(self.starttag(node, 'meta', **node.attributes))
def depart_meta(self, node):
self.body.append('[depart_meta]\n')
def visit_note(self, node):
self.visit_admonition(node, 'note')
def depart_note(self, node):
self.depart_admonition()
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node):
# flag tha the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
if self.use_optionlist_for_option_list:
self.body.append('\\item [')
else:
if len(node.astext()) > 14:
self.body.append('\\multicolumn{2}{l}{')
self.context.append('} \\\\\n ')
else:
self.context.append('')
self.body.append('\\texttt{')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
if self.use_optionlist_for_option_list:
self.body.append('] ')
else:
self.body.append('}')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append('% [option list]\n')
if self.use_optionlist_for_option_list:
self.body.append('\\begin{optionlist}{3cm}\n')
else:
self.body.append('\\begin{center}\n')
# BUG: use admwidth or make it relative to textwidth ?
self.body.append('\\begin{tabularx}{.9\\linewidth}{lX}\n')
def depart_option_list(self, node):
if self.use_optionlist_for_option_list:
self.body.append('\\end{optionlist}\n')
else:
self.body.append('\\end{tabularx}\n')
self.body.append('\\end{center}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
if not self.use_optionlist_for_option_list:
self.body.append('\\\\\n')
def visit_option_string(self, node):
##self.body.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.body.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
index = node.parent.index(node)
if not ('contents' in self.topic_classes or
(isinstance(node.parent, nodes.compound) and
index > 0 and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound))):
self.body.append('\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append('{\\color{red}\\bfseries{}')
def depart_problematic(self, node):
self.body.append('}')
def visit_raw(self, node):
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_reference(self, node):
# BUG: hash_char "#" is trouble some in LaTeX.
# mbox and other environment do not like the '#'.
hash_char = '\\#'
if node.has_key('refuri'):
href = node['refuri'].replace('#',hash_char)
elif node.has_key('refid'):
href = hash_char + node['refid']
elif node.has_key('refname'):
href = hash_char + self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
self.body.append('\\href{%s}{' % href)
def depart_reference(self, node):
self.body.append('}')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
# BUG: this is just a hack to make sidebars render something
self.body.append('\n\\setlength{\\locallinewidth}{0.9\\admonitionwidth}\n')
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\colorbox[gray]{0.80}{\\parbox{\\admonitionwidth}{\n')
def depart_sidebar(self, node):
self.body.append('}}}\n') # end parbox colorbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
attribution_formats = {'dash': ('---', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.body.append('\n\\begin{flushright}\n')
self.body.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.body.append(self.context.pop() + '\n')
self.body.append('\\end{flushright}\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.body.append('\\textbf{')
self.literal_block_stack.append('\\textbf{')
def depart_strong(self, node):
self.body.append('}')
self.literal_block_stack.pop()
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append('~\\\\\n\\textbf{')
self.context.append('}\n\\smallskip\n')
elif isinstance(node.parent, nodes.document):
self.title = self.title + \
'\\\\\n\\large{%s}\n' % self.encode(node.astext())
raise nodes.SkipNode
elif isinstance(node.parent, nodes.section):
self.body.append('\\textbf{')
self.context.append('}\\vspace{0.2cm}\n\n\\noindent ')
def depart_subtitle(self, node):
self.body.append(self.context.pop())
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
self.body.append('\n')
def visit_table(self, node):
if self.active_table.is_open():
print 'nested tables are not supported'
raise AssertionError
self.active_table.open()
self.body.append('\n' + self.active_table.get_opening())
def depart_table(self, node):
self.body.append(self.active_table.get_closing() + '\n')
self.active_table.close()
def visit_target(self, node):
# BUG: why not (refuri or refid or refname) means not footnote ?
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
for id in node['ids']:
self.body.append('\\hypertarget{%s}{' % id)
self.context.append('}' * len(node['ids']))
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
# self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append('\\item[{')
def depart_term(self, node):
# definition list term.
self.body.append('}] ')
def visit_tgroup(self, node):
#self.body.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.body.append('{%s}\n' % self.active_table.get_colspecs())
if self.active_table.caption:
self.body.append('\\caption{%s}\\\\\n' % self.active_table.caption)
self.active_table.set('preamble written',1)
# TODO longtable supports firsthead and lastfoot too.
self.body.extend(self.active_table.visit_thead())
def depart_thead(self, node):
# the table header written should be on every page
# => \endhead
self.body.extend(self.active_table.depart_thead())
# and the firsthead => \endfirsthead
# BUG i want a "continued from previous page" on every not
# firsthead, but then we need the header twice.
#
# there is a \endfoot and \endlastfoot too.
# but we need the number of columns to
# self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
# self.body.append('\\hline\n\\endfoot\n')
# self.body.append('\\hline\n')
# self.body.append('\\endlastfoot\n')
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
self.depart_admonition()
def bookmark(self, node):
"""Append latex href and pdfbookmarks for titles.
"""
if node.parent['ids']:
for id in node.parent['ids']:
self.body.append('\\hypertarget{%s}{}\n' % id)
if not self.use_latex_toc:
# BUG level depends on style. pdflatex allows level 0 to 3
# ToC would be the only on level 0 so i choose to decrement the rest.
# "Table of contents" bookmark to see the ToC. To avoid this
# we set all zeroes to one.
l = self.section_level
if l>0:
l = l-1
# pdftex does not like "_" subscripts in titles
text = self.encode(node.astext())
for id in node.parent['ids']:
self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \
(l, text, id))
def visit_title(self, node):
"""Only 3 section levels are supported by LaTeX article (AFAIR)."""
if isinstance(node.parent, nodes.topic):
# section titles before the table of contents.
self.bookmark(node)
# BUG: latex chokes on center environment with "perhaps a missing item".
# so we use hfill.
self.body.append('\\subsubsection*{~\\hfill ')
# the closing brace for subsection.
self.context.append('\\hfill ~}\n')
# TODO: for admonition titles before the first section
# either specify every possible node or ... ?
elif isinstance(node.parent, nodes.sidebar) \
or isinstance(node.parent, nodes.admonition):
self.body.append('\\textbf{\\large ')
self.context.append('}\n\\smallskip\n')
elif isinstance(node.parent, nodes.table):
# caption must be written after column spec
self.active_table.caption = self.encode(node.astext())
raise nodes.SkipNode
elif self.section_level == 0:
# document title
self.title = self.encode(node.astext())
if not self.pdfinfo == None:
self.pdfinfo.append( 'pdftitle={%s}' % self.encode(node.astext()) )
raise nodes.SkipNode
else:
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\n')
self.bookmark(node)
if self.use_latex_toc:
section_star = ""
else:
section_star = "*"
section_name = self.d_class.section(self.section_level)
self.body.append('\\%s%s{' % (section_name, section_star))
self.context.append('}\n')
def depart_title(self, node):
self.body.append(self.context.pop())
def visit_topic(self, node):
self.topic_classes = node['classes']
if 'contents' in node['classes'] and self.use_latex_toc:
self.body.append('\\tableofcontents\n\n\\bigskip\n')
self.topic_classes = []
raise nodes.SkipNode
def visit_inline(self, node): # titlereference
self.body.append( '\\docutilsrole%s{' % node.get('class'))
def depart_inline(self, node):
self.body.append( '}' )
def depart_topic(self, node):
self.topic_classes = []
self.body.append('\n')
def visit_rubric(self, node):
self.body.append('\\rubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.body.append(self.context.pop())
def visit_transition(self, node):
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\\hspace*{\\fill}\\hrulefill\\hspace*{\\fill}')
self.body.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
self.depart_admonition()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
| garinh/cs | docs/support/docutils/writers/latex2e.py | Python | lgpl-2.1 | 75,964 | 0.00387 |
#! /usr/bin/env python
import sys
import os
import subprocess
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src)
from vmrunner import vmrunner
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def cleanup():
# Call the cleanup script - let python do the printing to get it synced
print subprocess.check_output(["./fat32_disk.sh", "clean"])
# Setup disk
subprocess.call(["./fat32_disk.sh"], shell=True)
# Clean up on exit
vm.on_exit(cleanup)
# Boot the VM
vm.cmake().boot(30).clean()
| ingve/IncludeOS | test/fs/integration/ide_write/test.py | Python | apache-2.0 | 663 | 0.007541 |
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
STATUS_COLORS = {
'default': 'blue',
'queued': 'blue',
'undetermined': 'blue',
'infected': 'red',
'uninfected': 'green',
'deposited': 'blue',
'rejected': 'red',
'accepted': 'green',
'valid': 'green',
'invalid': 'red',
'undefined': 'blue'
}
BOX_COLORS = {
'blue': 'primary',
'red': 'danger',
'green': 'success',
'grey': 'default'
}
@register.filter
@stringfilter
def status_color(status):
"""
This method will return grey for a unkown status.
"""
return STATUS_COLORS.get(status, 'grey')
@register.filter
def box_color(status):
"""
This method will return grey for a unkown status.
"""
return BOX_COLORS.get(STATUS_COLORS.get(status, 'grey'), 'default')
@register.filter
def status_sps(status):
"""
This method will return valid, invalid or undefined for a given result of
models.PackageMember.sps_validation_status().
status: Tuple(None, {})
status: Tuple(True, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []})
status: Tuple(False, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []})
"""
if status[0] is True:
return 'valid'
if status[0] is False:
return 'invalid'
return 'undefined'
@register.filter
def widget_scielops_colors_weight(xmls):
"""
This method will return a color for the SciELO PS widget. The color will
be matched according to the error level of any of the members of the package.
status: Dict with xml's returned by models.Package.xmls().
"""
if len(xmls['invalid']) > 0:
return STATUS_COLORS['invalid']
if len(xmls['undefined']) > 0:
return STATUS_COLORS['undefined']
if len(xmls['valid']) == 0:
return STATUS_COLORS['undefined']
return STATUS_COLORS['valid'] | gustavofonseca/penne-core | frontdesk/templatetags/frontdesk.py | Python | bsd-2-clause | 1,931 | 0.002071 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CampUserInvitation', fields ['camp', 'user']
db.create_unique(u'events_campuserinvitation', ['camp_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'CampUserInvitation', fields ['camp', 'user']
db.delete_unique(u'events_campuserinvitation', ['camp_id', 'user_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'competitions.competition': {
'Meta': {'ordering': "['name']", 'object_name': 'Competition'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'competitions.season': {
'Meta': {'ordering': "['competition', 'year', 'number']", 'object_name': 'Season'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.camp': {
'Meta': {'ordering': "['-start_time', 'end_time']", 'object_name': 'Camp', '_ormbases': [u'events.Event']},
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['events.Event']", 'unique': 'True', 'primary_key': 'True'}),
'invitation_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'invited': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['events.CampUserInvitation']", 'symmetrical': 'False'}),
'limit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'season': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Season']", 'null': 'True', 'blank': 'True'})
},
u'events.campuserinvitation': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('user', 'camp'),)", 'object_name': 'CampUserInvitation'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'camp': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Camp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_as': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'org_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'user_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_accepted_timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
u'events.event': {
'Meta': {'ordering': "['-start_time', 'end_time']", 'object_name': 'Event'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'registered_org': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organized_event_set'", 'symmetrical': 'False', 'through': u"orm['events.EventOrgRegistration']", 'to': u"orm['auth.User']"}),
'registered_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['events.EventUserRegistration']", 'symmetrical': 'False'}),
'registration_end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {})
},
u'events.eventorgregistration': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('event', 'organizer'),)", 'object_name': 'EventOrgRegistration'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'events.eventuserregistration': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('event', 'user'),)", 'object_name': 'EventUserRegistration'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['events'] | matus-stehlik/glowing-batman | events/migrations/0002_auto__add_unique_campuserinvitation_camp_user.py | Python | mit | 10,021 | 0.007983 |
"""Provides helpers for RFXtrx."""
from RFXtrx import get_device
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
@callback
def async_get_device_object(hass: HomeAssistantType, device_id):
"""Get a device for the given device registry id."""
device_registry = dr.async_get(hass)
registry_device = device_registry.async_get(device_id)
if registry_device is None:
raise ValueError(f"Device {device_id} not found")
device_tuple = list(list(registry_device.identifiers)[0])
return get_device(
int(device_tuple[1], 16), int(device_tuple[2], 16), device_tuple[3]
)
| aronsky/home-assistant | homeassistant/components/rfxtrx/helpers.py | Python | apache-2.0 | 715 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations(object):
"""ApplicationSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationSecurityGroup"]
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update ApplicationSecurityGroup
operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.ApplicationSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationSecurityGroupListResult"]
"""Gets all application security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationSecurityGroupListResult"]
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_application_security_groups_operations.py | Python | mit | 24,325 | 0.005015 |
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
l=len(height)
maxheight=[0 for i in range(l)]
leftmax=0
rightmax=0
res=0
for i in range(l):
if height[i]>leftmax:
leftmax=height[i]
maxheight[i]=leftmax
for i in reversed(range(l)):
if height[i]>rightmax:
rightmax=height[i]
if min(rightmax,maxheight[i])-height[i]>0:
res+=min(rightmax,maxheight[i])-height[i]
return res | dichen001/Go4Jobs | JoeXu/42. Trapping rain water.py | Python | gpl-3.0 | 616 | 0.027597 |
"""Naive range analysis for expression"""
from miasm2.analysis.modularintervals import ModularIntervals
_op_range_handler = {
"+": lambda x, y: x + y,
"&": lambda x, y: x & y,
"|": lambda x, y: x | y,
"^": lambda x, y: x ^ y,
"*": lambda x, y: x * y,
">>": lambda x, y: x >> y,
"a>>": lambda x, y: x.arithmetic_shift_right(y),
"<<": lambda x, y: x << y,
">>": lambda x, y: x >> y,
">>>": lambda x, y: x.rotation_right(y),
"<<<": lambda x, y: x.rotation_left(y),
}
def expr_range(expr):
"""Return a ModularIntervals containing the range of possible values of
@expr"""
max_bound = (1 << expr.size) - 1
if expr.is_int():
return ModularIntervals(expr.size, [(int(expr), int(expr))])
elif expr.is_id() or expr.is_mem():
return ModularIntervals(expr.size, [(0, max_bound)])
elif expr.is_slice():
interval_mask = ((1 << expr.start) - 1) ^ ((1 << expr.stop) - 1)
arg = expr_range(expr.arg)
# Mask for possible range, and shift range
return ((arg & interval_mask) >> expr.start).size_update(expr.size)
elif expr.is_compose():
sub_ranges = [expr_range(arg) for arg in expr.args]
args_idx = [info[0] for info in expr.iter_args()]
# No shift for the first one
ret = sub_ranges[0].size_update(expr.size)
# Doing it progressively (2 by 2)
for shift, sub_range in zip(args_idx[1:], sub_ranges[1:]):
ret |= sub_range.size_update(expr.size) << shift
return ret
elif expr.is_op():
# A few operation are handled with care
# Otherwise, overapproximate (ie. full range interval)
if expr.op in _op_range_handler:
sub_ranges = [expr_range(arg) for arg in expr.args]
return reduce(_op_range_handler[expr.op],
(sub_range for sub_range in sub_ranges[1:]),
sub_ranges[0])
elif expr.op == "-":
assert len(expr.args) == 1
return - expr_range(expr.args[0])
elif expr.op == "%":
assert len(expr.args) == 2
op, mod = [expr_range(arg) for arg in expr.args]
if mod.intervals.length == 1:
# Modulo intervals is not supported
return op % mod.intervals.hull()[0]
# Operand not handled, return the full domain
return ModularIntervals(expr.size, [(0, max_bound)])
elif expr.is_cond():
return expr_range(expr.src1).union(expr_range(expr.src2))
else:
raise TypeError("Unsupported type: %s" % expr.__class__)
| chubbymaggie/miasm | miasm2/analysis/expression_range.py | Python | gpl-2.0 | 2,613 | 0.000383 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2013 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
spyderlib.py3compat
-------------------
Transitional module providing compatibility functions intended to help
migrating from Python 2 to Python 3.
This module should be fully compatible with:
* Python >=v2.6
* Python 3
"""
from __future__ import print_function
import sys
import os
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
#==============================================================================
# Data types
#==============================================================================
if PY2:
# Python 2
TEXT_TYPES = (str, unicode)
INT_TYPES = (int, long)
else:
# Python 3
TEXT_TYPES = (str,)
INT_TYPES = (int,)
NUMERIC_TYPES = tuple(list(INT_TYPES) + [float, complex])
#==============================================================================
# Renamed/Reorganized modules
#==============================================================================
if PY2:
# Python 2
import __builtin__ as builtins
import ConfigParser as configparser
try:
import _winreg as winreg
except ImportError:
pass
from sys import maxint as maxsize
try:
import CStringIO as io
except ImportError:
import StringIO as io
try:
import cPickle as pickle
except ImportError:
import pickle
from UserDict import DictMixin as MutableMapping
import thread as _thread
import repr as reprlib
else:
# Python 3
import builtins
import configparser
try:
import winreg
except ImportError:
pass
from sys import maxsize
import io
import pickle
from collections import MutableMapping
import _thread
import reprlib
#==============================================================================
# Strings
#==============================================================================
if PY2:
# Python 2
import codecs
def u(obj):
"""Make unicode object"""
return codecs.unicode_escape_decode(obj)[0]
else:
# Python 3
def u(obj):
"""Return string as it is"""
return obj
def is_text_string(obj):
"""Return True if `obj` is a text string, False if it is anything else,
like binary data (Python 3) or QString (Python 2, PyQt API #1)"""
if PY2:
# Python 2
return isinstance(obj, basestring)
else:
# Python 3
return isinstance(obj, str)
def is_binary_string(obj):
"""Return True if `obj` is a binary string, False if it is anything else"""
if PY2:
# Python 2
return isinstance(obj, str)
else:
# Python 3
return isinstance(obj, bytes)
def is_string(obj):
"""Return True if `obj` is a text or binary Python string object,
False if it is anything else, like a QString (Python 2, PyQt API #1)"""
return is_text_string(obj) or is_binary_string(obj)
def is_unicode(obj):
"""Return True if `obj` is unicode"""
if PY2:
# Python 2
return isinstance(obj, unicode)
else:
# Python 3
return isinstance(obj, str)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding)
#==============================================================================
# Function attributes
#==============================================================================
def get_func_code(func):
"""Return function code object"""
if PY2:
# Python 2
return func.func_code
else:
# Python 3
return func.__code__
def get_func_name(func):
"""Return function name"""
if PY2:
# Python 2
return func.func_name
else:
# Python 3
return func.__name__
def get_func_defaults(func):
"""Return function default argument values"""
if PY2:
# Python 2
return func.func_defaults
else:
# Python 3
return func.__defaults__
#==============================================================================
# Special method attributes
#==============================================================================
def get_meth_func(obj):
"""Return method function object"""
if PY2:
# Python 2
return obj.im_func
else:
# Python 3
return obj.__func__
def get_meth_class_inst(obj):
"""Return method class instance"""
if PY2:
# Python 2
return obj.im_self
else:
# Python 3
return obj.__self__
def get_meth_class(obj):
"""Return method class"""
if PY2:
# Python 2
return obj.im_class
else:
# Python 3
return obj.__self__.__class__
#==============================================================================
# Misc.
#==============================================================================
if PY2:
# Python 2
input = raw_input
getcwd = os.getcwdu
cmp = cmp
import string
str_lower = string.lower
from itertools import izip_longest as zip_longest
else:
# Python 3
input = input
getcwd = os.getcwd
def cmp(a, b):
return (a > b) - (a < b)
str_lower = str.lower
from itertools import zip_longest
def qbytearray_to_str(qba):
"""Convert QByteArray object to str in a way compatible with Python 2/3"""
return str(bytes(qba.toHex().data()).decode())
if __name__ == '__main__':
pass
| CVML/winpython | winpython/py3compat.py | Python | mit | 6,585 | 0.003949 |
""" This provides some useful code used by other modules. This is not to be
used by the end user which is why it is hidden. """
import string, sys
class LinkError(Exception):
pass
def refine_import_err(mod_name, extension_name, exc):
""" Checks to see if the ImportError was because the library
itself was not there or because there was a link error. If there
was a link error it raises a LinkError if not it does nothing.
Keyword arguments
-----------------
- mod_name : The name of the Python module that was imported.
- extension_name : The name of the extension module that is to be
imported by the module having mod_name.
- exc : The exception raised when the module called mod_name was
imported.
To see example usage look at __init__.py.
"""
try:
del sys.modules['vtk.%s'%mod_name]
except KeyError:
pass
if string.find(str(exc), extension_name) == -1:
raise LinkError, str(exc)
| b3c/VTK-5.8 | Wrapping/Python/vtk/__helper.py | Python | bsd-3-clause | 981 | 0.008155 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import contextlib
import ctypes
import datetime
import email.utils
import errno
import getpass
import gzip
import itertools
import io
import json
import locale
import math
import os
import pipes
import platform
import re
import ssl
import socket
import struct
import subprocess
import sys
import traceback
import xml.etree.ElementTree
import zlib
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = _unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = _unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
u'TEST'.encode(pref)
except:
pref = 'UTF-8'
return pref
if sys.version_info < (3,0):
def compat_print(s):
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3,0):
def write_json_file(obj, fn):
with open(fn, 'wb') as f:
json.dump(obj, f)
else:
def write_json_file(obj, fn):
with open(fn, 'w', encoding='utf-8') as f:
json.dump(obj, f)
if sys.version_info >= (2,7):
def find_xpath_attr(node, xpath, key, val):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z]+$', key)
assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val)
expr = xpath + u"[@%s='%s']" % (key, val)
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val):
for f in node.findall(xpath):
if f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def htmlentity_transform(matchobj):
"""Transforms an HTML entity to a character.
This function receives a match object and is intended to be used with
the re.sub() function.
"""
entity = matchobj.group(1)
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
mobj = re.match(u'(?u)#(x?\\d+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith(u'x'):
base = 16
numstr = u'0%s' % numstr
else:
base = 10
return compat_chr(int(numstr, base))
# Unknown entity in name, return its literal representation
return (u'&%s;' % entity)
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
class BaseHTMLParser(compat_html_parser.HTMLParser):
def __init(self):
compat_html_parser.HTMLParser.__init__(self)
self.html = None
def loads(self, html):
self.html = html
self.feed(html)
self.close()
class AttrParser(BaseHTMLParser):
"""Modified HTMLParser that isolates a tag with the specified attribute"""
def __init__(self, attribute, value):
self.attribute = attribute
self.value = value
self.result = None
self.started = False
self.depth = {}
self.watch_startpos = False
self.error_count = 0
BaseHTMLParser.__init__(self)
def error(self, message):
if self.error_count > 10 or self.started:
raise compat_html_parser.HTMLParseError(message, self.getpos())
self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
self.error_count += 1
self.goahead(1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if self.started:
self.find_startpos(None)
if self.attribute in attrs and attrs[self.attribute] == self.value:
self.result = [tag]
self.started = True
self.watch_startpos = True
if self.started:
if not tag in self.depth: self.depth[tag] = 0
self.depth[tag] += 1
def handle_endtag(self, tag):
if self.started:
if tag in self.depth: self.depth[tag] -= 1
if self.depth[self.result[0]] == 0:
self.started = False
self.result.append(self.getpos())
def find_startpos(self, x):
"""Needed to put the start position of the result (self.result[1])
after the opening tag with the requested id"""
if self.watch_startpos:
self.watch_startpos = False
self.result.append(self.getpos())
handle_entityref = handle_charref = handle_data = handle_comment = \
handle_decl = handle_pi = unknown_decl = find_startpos
def get_result(self):
if self.result is None:
return None
if len(self.result) != 3:
return None
lines = self.html.split('\n')
lines = lines[self.result[1][0]-1:self.result[2][0]]
lines[0] = lines[0][self.result[1][1]:]
if len(lines) == 1:
lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
lines[-1] = lines[-1][:self.result[2][1]]
return '\n'.join(lines).strip()
# Hack for https://github.com/rg3/youtube-dl/issues/662
if sys.version_info < (2, 7, 3):
AttrParser.parse_endtag = (lambda self, i:
i + len("</scr'+'ipt>")
if self.rawdata[i:].startswith("</scr'+'ipt>")
else compat_html_parser.HTMLParser.parse_endtag(self, i))
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute("id", id, html)
def get_element_by_attribute(attribute, value, html):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
parser = AttrParser(attribute, value)
try:
parser.loads(html)
except compat_html_parser.HTMLParseError:
pass
return parser.get_result()
class MetaParser(BaseHTMLParser):
"""
Modified HTMLParser that isolates a meta tag with the specified name
attribute.
"""
def __init__(self, name):
BaseHTMLParser.__init__(self)
self.name = name
self.content = None
self.result = None
def handle_starttag(self, tag, attrs):
if tag != 'meta':
return
attrs = dict(attrs)
if attrs.get('name') == self.name:
self.result = attrs.get('content')
def get_result(self):
return self.result
def get_meta_content(name, html):
"""
Return the content attribute from the meta tag with the given name attribute.
"""
parser = MetaParser(name)
try:
parser.loads(html)
except compat_html_parser.HTMLParseError:
pass
return parser.get_result()
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == u'-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = os.path.join(
re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part)
for path_part in os.path.split(filename)
)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
"""
def replace_insane(char):
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
result = u''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if not result:
result = '_'
return result
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s)
return result
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess:
return s
else:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return s.encode(encoding, 'ignore')
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
elif secs > 60:
return '%d:%02d' % (secs // 60, secs % 60)
else:
return '%d' % secs
def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
if sys.version_info < (3, 2):
import httplib
class HTTPSConnectionV3(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
try:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3)
except ssl.SSLError:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23)
class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnectionV3, req)
return HTTPSHandlerV3(**kwargs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
try:
context.load_default_certs()
except AttributeError:
pass # Python < 3.4
return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if not expected:
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
def format_traceback(self):
if self.traceback is None:
return None
return u''.join(traceback.format_tb(self.traceback))
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class DownloadError(Exception):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(Exception):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(Exception):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
self.msg = msg
class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(Exception):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(Exception):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
# Both in bytes
downloaded = None
expected = None
def __init__(self, downloaded, expected):
self.downloaded = downloaded
self.expected = expected
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-No-Compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
@staticmethod
def addinfourl_wrapper(stream, headers, url, code):
if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code
return ret
def http_request(self, req):
for h,v in std_headers.items():
if h in req.headers:
del req.headers[h]
req.add_header(h, v)
if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
del req.headers['Youtubedl-no-compression']
if 'Youtubedl-user-agent' in req.headers:
if 'User-agent' in req.headers:
del req.headers['User-agent']
req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
del req.headers['Youtubedl-user-agent']
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp
https_request = http_request
https_response = http_response
def parse_iso8601(date_str):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
m = re.search(
r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group(0))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
dt = datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S') - timezone
return calendar.timegm(dt.timetuple())
def unified_strdate(date_str):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
#Replace commas
date_str = date_str.replace(',', ' ')
# %z (UTC offset) is only supported in python>=3.2
date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
format_expressions = [
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%b %d %Y',
'%Y-%m-%d',
'%d.%m.%Y',
'%d/%m/%Y',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
]
for expression in format_expressions:
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
return upload_date
def determine_ext(url, default_ext=u'unknown_video'):
guess = url.partition(u'?')[0].rpartition(u'.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format):
return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today)[+-][0-9](day|week|month|year)(s)?"""
today = datetime.date.today()
if date_str == 'now'or date_str == 'today':
return today
match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if match is not None:
sign = match.group('sign')
time = int(match.group('time'))
if sign == '-':
time = -time
unit = match.group('unit')
#A bad aproximation?
if unit == 'month':
unit = 'day'
time *= 30
elif unit == 'year':
unit = 'day'
time *= 365
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day,day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % ( self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def write_string(s, out=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
s = s.encode(preferredencoding(), 'ignore')
try:
out.write(s)
except UnicodeEncodeError:
# In Windows shells, this can fail even when the codec is just charmap!?
# See https://wiki.python.org/moin/PrintFails#Issue
if sys.platform == 'win32' and hasattr(out, 'encoding'):
s = s.encode(out.encoding, 'ignore').decode(out.encoding)
out.write(s)
else:
raise
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
if isinstance(chr(0), bytes): # Python 2
return ''.join([chr(x) for x in xs])
else:
return bytes(xs)
def get_cachedir(params={}):
cache_root = os.environ.get('XDG_CACHE_HOME',
os.path.expanduser('~/.cache'))
return params.get('cachedir', os.path.join(cache_root, 'youtube-dl'))
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
import fcntl
def _lock_file(f, exclusive):
fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.lockf(f, fcntl.LOCK_UN)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def shell_quote(args):
quoted_args = []
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(pipes.quote(a))
return u' '.join(quoted_args)
def takewhile_inclusive(pred, seq):
""" Like itertools.takewhile, but include the latest evaluated element
(the first element so that Not pred(e)) """
for e in seq:
yield e
if not pred(e):
return
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
sdata = compat_urllib_parse.urlencode(
{u'__youtubedl_smuggle': json.dumps(data)})
return url + u'#' + sdata
def unsmuggle_url(smug_url, default=None):
if not '#__youtubedl_smuggle' in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition(u'#')
jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return u'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return u'%.2f%s' % (converted, suffix)
def str_to_int(int_str):
int_str = re.sub(r'[,\.]', u'', int_str)
return int(int_str)
def get_term_width():
columns = os.environ.get('COLUMNS', None)
if columns:
return int(columns)
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
return int(out.split()[1])
except:
pass
return None
def month_by_name(name):
""" Return the number of a month by (locale-independently) English name """
ENGLISH_NAMES = [
u'January', u'February', u'March', u'April', u'May', u'June',
u'July', u'August', u'September', u'October', u'November', u'December']
try:
return ENGLISH_NAMES.index(name) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
u'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
try:
libc = ctypes.cdll.LoadLibrary("libc.so.6")
except OSError:
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
if s.startswith(start):
return s[len(start):]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip(u'/').split(u'/')[-1]
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return "HEAD"
def int_or_none(v, scale=1):
return v if v is None else (int(v) // scale)
def parse_duration(s):
if s is None:
return None
m = re.match(
r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$', s)
if not m:
return None
res = int(m.group('secs'))
if m.group('mins'):
res += int(m.group('mins')) * 60
if m.group('hours'):
res += int(m.group('hours')) * 60 * 60
return res
def prepend_extension(filename, ext):
name, real_ext = os.path.splitext(filename)
return u'{0}.{1}{2}'.format(name, ext, real_ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
class PagedList(object):
def __init__(self, pagefunc, pagesize):
self._pagefunc = pagefunc
self._pagesize = pagesize
def __len__(self):
# This is only useful for tests
return len(self.getslice())
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = list(self._pagefunc(pagenum))
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
def uppercase_escape(s):
return re.sub(
r'\\U([0-9a-fA-F]{8})',
lambda m: compat_chr(int(m.group(1), base=16)), s)
try:
struct.pack(u'!I', 0)
except TypeError:
# In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
def struct_pack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.pack(spec, *args)
def struct_unpack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.unpack(spec, *args)
else:
struct_pack = struct.pack
struct_unpack = struct.unpack
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = u'\xef\xbb\xbf'
if url.startswith(BOM_UTF8):
url = url[len(BOM_UTF8):]
url = url.strip()
if url.startswith(('#', ';', ']')):
return False
return url
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
def parse_xml(s):
class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
def doctype(self, name, pubid, system):
pass # Ignore doctypes
parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
def strip_jsonp(code):
return re.sub(r'(?s)^[a-zA-Z_]+\s*\(\s*(.*)\);\s*?\s*$', r'\1', code)
| laborautonomo/youtube-dl | youtube_dl/utils.py | Python | unlicense | 42,818 | 0.002382 |
# -*- coding: utf-8 -*-
from click import open_file
def read_file(path):
with open_file(path, 'r', encoding='utf8') as f:
return ''.join(f.readlines())
def test_import():
from catex import LaTeX
def test_import_():
import catex
def test_latex_simple():
from catex import LaTeX
f1 = LaTeX.from_file("tests/data/latex1.tex")
f1.merge(f1)
def test_merge():
from catex.core import merge
rv = merge("tests/data/latex1.tex", "tests/data/latex2.tex")
expected_result = read_file("tests/data/merge1_2.tex")
assert rv.__repr__() == expected_result
def test_merge_packeges():
from catex.core import merge_packages
pkg1 = [
['th', ['mou', 'moi', 'mumu=tutu']],
['blo', []],
['bli', ['tut']],
['bli', []],
['bleh', []],
['bla', []]]
pkg2 = [
['th', ['mou', 'moi', 'mumu=tutu']],
['blo', []],
['bli', ['tut']],
['bli', []],
['bleh', []],
['bla', []]
]
pkg_rv = [
['th', ['mumu=tutu', 'mou', 'moi']],
['blo', []],
['bli', ['tut']],
['bli', ['tut']], ['bleh', []],
['bla', []]
]
assert merge_packages(pkg1, pkg2) == pkg_rv
def test_repr():
from catex.core import LaTeX
l = LaTeX.from_file("tests/data/latex_sorted.tex")
text = read_file("tests/data/latex_sorted.tex")
assert l.__repr__() == ''.join(text)
| Alexis-benoist/CaTeX | tests/test_core.py | Python | apache-2.0 | 1,434 | 0.001395 |
# -*- coding: utf-8 -*-
# DO NOT DELETE
import StringIO
import csv
import datetime
today = datetime.date.today()
from flask import (
Blueprint,
make_response
)
from flask.ext.login import login_required
from sqlalchemy import desc
from feedback.surveys.models import Survey
blueprint = Blueprint(
'surveys',
__name__,
url_prefix='/surveys',
static_folder="../static")
@blueprint.route('/download')
@login_required
def to_csv():
csvList = []
csvList.append([
'date_submitted',
'method',
'language',
'route',
'rating',
'role',
'get_done',
'purpose',
'best',
'worst',
'improvement',
'follow_up',
'contact',
'more_comments'])
survey_models = Survey.query.order_by(desc(Survey.date_submitted)).all()
for survey_model in survey_models:
csvList.append([
survey_model.date_submitted,
survey_model.method,
survey_model.lang,
survey_model.route_en,
survey_model.rating,
survey_model.role_en,
survey_model.get_done,
survey_model.purpose_en,
survey_model.best_en,
survey_model.worst_en,
survey_model.improvement,
survey_model.follow_up,
survey_model.contact,
survey_model.more_comments])
strIO = StringIO.StringIO()
writer = csv.writer(strIO)
writer.writerows(csvList)
output = make_response(strIO.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
| codeforamerica/mdc-feedback | feedback/surveys/views.py | Python | mit | 1,698 | 0.002356 |
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2005 Al Riddoch (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Vector3D
import math
from random import *
import server
class Logging(server.Task):
""" A proof of concept task for logging."""
def cut_operation(self, op):
""" Op handler for cut op which activates this task """
# print "Logging.cut"
if len(op) < 1:
sys.stderr.write("Logging task has no target in cut op")
self.target = server.world.get_object_ref(op[0].id)
self.tool = op.to
def tick_operation(self, op):
""" Op handler for regular tick op """
# print "Logging.tick"
if self.target() is None:
# print "Target is no more"
self.irrelevant()
return
current_status = self.target().status
#Measure the distance between the entity horizontal edges. Else we won't be able to reach if either entity is too thick.
distance_between_entity_edges_squared = square_horizontal_edge_distance(self.character.location, self.target().location)
#Assume that a standard human can reach 1.5 meters, and use this to determine if we're close enough to be able to perform the logging
standard_human_reach_squared=1.5*1.5
if distance_between_entity_edges_squared > standard_human_reach_squared:
self.progress = 1 - current_status
self.rate = 0
return self.next_tick(1.75)
res=Oplist()
if current_status > 0.11:
set=Operation("set", Entity(self.target().id, status=current_status-0.1), to=self.target())
res.append(set)
# print "CHOP",current_status
normal=Vector3D(0,0,1)
# print "LOC.ori ", self.target().location.orientation
# calculate how tilted the tree is already
if self.target().location.orientation.is_valid():
normal.rotate(self.target().location.orientation)
# print "Normal ", normal, normal.dot(Vector3D(0,0,1))
# if the tree is standing, and it's already half cut down, rotate
# it to be horizontal, away from the character
if normal.dot(Vector3D(0,0,1)) > 0.8 and current_status < 0.5:
# print "Fall down"
# determine the axis of rotation by cross product of the vector
# from character to tree, and vertically upward vector
axis = distance_to(self.character.location,
self.target().location).cross(Vector3D(0,0,1))
# the axis must be a unit vector
try:
axis = axis.unit_vector()
except ZeroDivisionError:
axis = Vector3D(1,0,0)
# print "axis ", axis
# create a rotation of 90 degrees around this axis
orient = Quaternion(axis, math.pi / -2.0)
# if the tree is rotated, apply this too
if self.target().location.orientation.is_valid():
orient = self.target().location.orientation * orient
move_location = self.target().location.copy()
move_location.orientation = orient
move = Operation("move", Entity(self.target().id, mode='felled',
location=move_location),
to = self.target())
res.append(move)
else:
# print "become log"
set = Operation("set", Entity(self.target().id, status = -1),
to = self.target())
res.append(set)
create_loc = self.target().location.copy()
create_loc.orientation = self.target().location.orientation
create = Operation("create",
Entity(parents = ["lumber"],
mass = self.target().mass,
location = create_loc,
bbox = self.target().bbox),
to = self.target())
res.append(create)
self.progress = 1 - current_status
self.rate = 0.1 / 1.75
res.append(self.next_tick(1.75))
return res
| ytaben/cyphesis | rulesets/mason/world/tasks/Logging.py | Python | gpl-2.0 | 4,546 | 0.009899 |
# Copyright (C) 2014 @threatlead
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class SpynetRat(Signature):
name = "rat_spynet"
description = "Creates known SpyNet mutexes and/or registry changes."
severity = 3
categories = ["rat"]
families = ["spynet"]
authors = ["threatlead", "nex"]
references = [
"https://malwr.com/analysis/ZDQ1NjBhNWIzNTdkNDRhNjhkZTFmZTBkYTU2YjMwNzg/",
"https://malwr.com/analysis/MjkxYmE2YzczNzcwNGJiZjljNDcwMzA2ZDkyNDU2Y2M/",
"https://malwr.com/analysis/N2E3NWRiNDMyYjIwNGE0NTk3Y2E5NWMzN2UwZTVjMzI/",
"https://malwr.com/analysis/N2Q2NWY0Y2MzOTM0NDEzNmE1MTdhOThiNTQxMzhiNzk/"
]
minimum = "1.2"
def run(self):
indicators = [
".*CYBERGATEUPDATE",
".*\(\(SpyNet\)\).*",
".*Spy-Net.*",
".*X_PASSWORDLIST_X.*",
".*X_BLOCKMOUSE_X.*",
#".*PERSIST", # Causes false positive detection on XtremeRAT samples.
".*_SAIR",
]
for indicator in indicators:
if self.check_mutex(pattern=indicator, regex=True):
return True
keys = [
".*\\SpyNet\\.*",
]
for key in keys:
if self.check_write_key(pattern=key, regex=True):
return True
return False
| lixiangning888/whole_project | modules/signatures_orignal/rat_spynet.py | Python | lgpl-3.0 | 1,986 | 0.006546 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-12 02:22
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operation_finance', '0020_auto_20170711_1429'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='due_by',
field=models.DateField(default=datetime.datetime(2017, 8, 6, 2, 22, 37, 974278)),
),
]
| michealcarrerweb/LHVent_app | operation_finance/migrations/0021_auto_20170712_0222.py | Python | mit | 519 | 0.001927 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from bedrock.redirects.util import no_redirect, platform_redirector, redirect
def firefox_mobile_faq(request, *args, **kwargs):
qs = request.META.get("QUERY_STRING", "")
if "os=firefox-os" in qs:
return "https://support.mozilla.org/products/firefox-os"
return "https://support.mozilla.org/products/mobile"
def firefox_channel(*args, **kwargs):
return platform_redirector("firefox.channel.desktop", "firefox.channel.android", "firefox.channel.ios")
redirectpatterns = (
# overrides
# issue 8096
redirect(r"^firefox/beta/all/?$", "firefox.all", anchor="product-desktop-beta"),
redirect(r"^firefox/developer/all/?$", "firefox.all", anchor="product-desktop-developer"),
redirect(r"^firefox/aurora/all/?$", "firefox.all", anchor="product-desktop-developer"),
redirect(r"^firefox/nightly/all/?$", "firefox.all", anchor="product-desktop-nightly"),
redirect(r"^firefox/organizations/all/?$", "firefox.all", anchor="product-desktop-esr"),
redirect(r"^firefox/android/all/?$", "firefox.all", anchor="product-android-release"),
redirect(r"^firefox/android/beta/all/?$", "firefox.all", anchor="product-android-beta"),
redirect(r"^firefox/android/nightly/all/?$", "firefox.all", anchor="product-android-nightly"),
# bug 831810 & 1142583 & 1239960, 1329931
redirect(r"^mwc/?$", "https://support.mozilla.org/products/firefox-os", re_flags="i"),
# bug 748503
redirect(r"^projects/firefox/[^/]+a[0-9]+/firstrun(?P<p>.*)$", "/firefox/nightly/firstrun{p}"),
# bug 1275483
redirect(r"^firefox/nightly/whatsnew/?", "firefox.nightly.firstrun"),
# bug 840814
redirect(
r"^projects/firefox"
r"(?P<version>/(?:\d+\.\d+\.?(?:\d+)?\.?(?:\d+)?(?:[a|b]?)(?:\d*)(?:pre)?(?:\d)?))"
r"(?P<page>/(?:firstrun|whatsnew))"
r"(?P<rest>/.*)?$",
"/firefox{version}{page}{rest}",
),
# bug 877165
redirect(r"^firefox/connect", "mozorg.home"),
# bug 657049, 1238851
redirect(r"^firefox/accountmanager/?$", "https://developer.mozilla.org/Persona"),
# Bug 1009247, 1101220, 1299947, 1314603, 1328409
redirect(r"^(firefox/)?beta/?$", firefox_channel(), cache_timeout=0, anchor="beta"),
redirect(r"^(firefox/)?aurora/?$", firefox_channel(), cache_timeout=0, anchor="aurora"),
redirect(r"^(firefox/)?nightly/?$", firefox_channel(), cache_timeout=0, anchor="nightly"),
redirect(r"^mobile/beta/?$", "firefox.channel.android", anchor="beta"),
redirect(r"^mobile/aurora/?$", "firefox.channel.android", anchor="aurora"),
redirect(r"^mobile/nightly/?$", "firefox.channel.android", anchor="nightly"),
# bug 988044
redirect(r"^firefox/unsupported-systems\.html$", "firefox.unsupported-systems"),
# bug 736934, 860865, 1101220, 1153351
redirect(r"^mobile/notes/?$", "/firefox/android/notes/"),
redirect(r"^mobile/(?P<channel>(beta|aurora))/notes/?$", "/firefox/android/{channel}/notes/"),
redirect(r"^firefox/system-requirements(\.html)?$", "/firefox/system-requirements/"),
redirect(r"^firefox/(?P<channel>(beta|aurora|organizations))/system-requirements(\.html)?$", "/firefox/{channel}/system-requirements/"),
# bug 1155870
redirect(r"^firefox/os/(releases|notes)/?$", "https://developer.mozilla.org/Firefox_OS/Releases"),
redirect(r"^firefox/os/(?:release)?notes/(?P<v>[^/]+)/?$", "https://developer.mozilla.org/Firefox_OS/Releases/{v}"),
# bug 878871
redirect(r"^firefoxos", "/firefox/os/"),
# bug 1438302
no_redirect(r"^firefox/download/thanks/?$"),
# Bug 1006616
redirect(r"^download/?$", "firefox.new"),
# Bug 1409554
redirect(r"^(firefox|mobile)/download", "firefox.new"),
# bug 837883
redirect(r"^firefox/firefox\.exe$", "mozorg.home", re_flags="i"),
# bug 821006
redirect(r"^firefox/all(\.html)?$", "firefox.all"),
# bug 727561
redirect(r"^firefox/search(?:\.html)?$", "firefox.new"),
# bug 860865, 1101220, issue 8096
redirect(r"^firefox/all-(?:beta|rc)(?:/|\.html)?$", "firefox.all", anchor="product-desktop-beta"),
redirect(r"^firefox/all-aurora(?:/|\.html)?$", "firefox.all", anchor="product-desktop-developer"),
redirect(r"^firefox/aurora/(?P<page>all|notes|system-requirements)/?$", "/firefox/developer/{page}/"),
redirect(r"^firefox/organizations/all\.html$", "firefox.all", anchor="product-desktop-esr"),
# bug 729329
redirect(r"^mobile/sync", "firefox.sync"),
# bug 882845
redirect(r"^firefox/toolkit/download-to-your-devices", "firefox.new"),
# bug 1014823
redirect(r"^(products/)?firefox/releases/whatsnew/?$", "firefox.whatsnew"),
# bug 929775
redirect(
r"^firefox/update",
"firefox.new",
query={
"utm_source": "firefox-browser",
"utm_medium": "firefox-browser",
"utm_campaign": "firefox-update-redirect",
},
),
# Bug 868182, 986174
redirect(r"^(m|(firefox/)?mobile)/features/?$", "firefox.browsers.mobile.index"),
redirect(r"^(m|(firefox/)?mobile)/faq/?$", firefox_mobile_faq, query=False),
# bug 884933
redirect(r"^(m|(firefox/)?mobile)/platforms/?$", "https://support.mozilla.org/kb/will-firefox-work-my-mobile-device"),
redirect(r"^m/?$", "firefox.new"),
# Bug 730488 deprecate /firefox/all-older.html
redirect(r"^firefox/all-older\.html$", "firefox.new"),
# bug 1120658
redirect(r"^seamonkey-transition\.html$", "http://www-archive.mozilla.org/seamonkey-transition.html"),
# Bug 1186373
redirect(r"^firefox/hello/npssurvey/?$", "https://www.surveygizmo.com/s3/2227372/Firefox-Hello-Product-Survey", permanent=False),
# Bug 1221739
redirect(r"^firefox/hello/feedbacksurvey/?$", "https://www.surveygizmo.com/s3/2319863/d2b7dc4b5687", permanent=False),
# bug 1148127
redirect(r"^products/?$", "firefox"),
# Bug 1110927
redirect(r"^(products/)?firefox/start/central\.html$", "firefox.new"),
redirect(r"^firefox/sync/firstrun\.html$", "firefox.sync"),
# Bug 920212
redirect(r"^firefox/fx(/.*)?", "firefox"),
# Bug 979531, 1003727, 979664, 979654, 979660
redirect(r"^firefox/customize/?$", "https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars"),
redirect(r"^firefox/(?:performance|happy|speed|memory)/?$", "firefox.features.fast"),
redirect(r"^firefox/security/?$", "firefox.features.independent"),
redirect(r"^firefox/technology/?$", "https://developer.mozilla.org/docs/Tools"),
# Previously Bug 979527 / Github #10004 "Getting Started" Page
redirect(r"^(products/)?firefox/central(/|\.html|-lite\.html)?$", "firefox"),
# bug 868169
redirect(r"^mobile/android-download\.html$", "https://play.google.com/store/apps/details", query={"id": "org.mozilla.firefox"}, merge_query=True),
redirect(
r"^mobile/android-download-beta\.html$",
"https://play.google.com/store/apps/details",
query={"id": "org.mozilla.firefox_beta"},
merge_query=True,
),
# bug 675031
redirect(
r"^projects/fennec(?P<page>/[\/\w\.-]+)?", "http://website-archive.mozilla.org/www.mozilla.org/fennec_releasenotes/projects/fennec{page}"
),
# bug 876581
redirect(r"^firefox/phishing-protection(/?)$", "https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work"),
# bug 1006079
redirect(r"^mobile/home/?(?:index\.html)?$", "https://blog.mozilla.org/services/2012/08/31/retiring-firefox-home/"),
# bug 949562
redirect(
r"^mobile/home/1\.0/releasenotes(?:/(?:index\.html)?)?$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0/releasenotes/",
),
redirect(
r"^mobile/home/1\.0\.2/releasenotes(?:/(?:index\.html)?)?$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0.2/releasenotes/",
),
redirect(r"^mobile/home/faq(?:/(?:index\.html)?)?$", "http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/faq/"),
# bug 960064
redirect(r"^firefox/(?P<num>vpat-[.1-5]+)(?:\.html)?$", "http://website-archive.mozilla.org/www.mozilla.org/firefox_vpat/firefox-{num}.html"),
redirect(r"^firefox/vpat(?:\.html)?", "http://website-archive.mozilla.org/www.mozilla.org/firefox_vpat/firefox-vpat-3.html"),
# bug 1017564
redirect(r"^mobile/.+/system-requirements/?$", "https://support.mozilla.org/kb/will-firefox-work-my-mobile-device"),
# bug 858315
redirect(r"^projects/devpreview/firstrun(?:/(?:index\.html)?)?$", "/firefox/firstrun/"),
redirect(
r"^projects/devpreview/(?P<page>[\/\w\.-]+)?$",
"http://website-archive.mozilla.org/www.mozilla.org/devpreview_releasenotes/projects/devpreview/{page}",
),
# bug 1001238, 1025056
no_redirect(r"^firefox/(24\.[5678]\.\d|28\.0)/releasenotes/?$"),
# bug 1235082
no_redirect(r"^firefox/23\.0(\.1)?/releasenotes/?$"),
# bug 947890, 1069902
redirect(
r"^firefox/releases/(?P<v>[01]\.(?:.*))$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/firefox/releases/{v}",
),
redirect(
r"^(?P<path>(?:firefox|mobile)/(?:\d)\.(?:.*)/releasenotes(?:.*))$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/{path}",
),
#
# bug 988746, 989423, 994186, 1153351
redirect(r"^mobile/(?P<v>2[38]\.0(?:\.\d)?|29\.0(?:beta|\.\d)?)/releasenotes/?$", "/firefox/android/{v}/releasenotes/"),
redirect(r"^mobile/(?P<v>[3-9]\d\.\d(?:a2|beta|\.\d)?)/(?P<p>aurora|release)notes/?$", "/firefox/android/{v}/{p}notes/"),
# bug 1041712, 1069335, 1069902
redirect(
r"^(?P<prod>firefox|mobile)/(?P<vers>([0-9]|1[0-9]|2[0-8])\.(\d+(?:beta|a2|\.\d+)?))"
r"/(?P<channel>release|aurora)notes/(?P<page>[\/\w\.-]+)?$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/{prod}/{vers}/{channel}notes/{page}",
),
# bug 767614 superceeded by bug 957711 and 1003718 and 1239960
redirect(r"^(fennec)/?$", "firefox"),
# issue 8749
redirect(r"^(mobile)/?$", "firefox.browsers.mobile.index"),
# bug 876668
redirect(r"^mobile/customize(?:/.*)?$", "firefox.browsers.mobile.index"),
# bug 1211907
redirect(r"^firefox/independent/?$", "firefox.new"),
redirect(r"^firefox/personal/?$", "firefox.new"),
# bug 845983
redirect(r"^metrofirefox(?P<path>/.*)?$", "/firefox{path}"),
# bug 1003703, 1009630
redirect(
r"^firefox(?P<vers>/.+)/firstrun/eu/?$",
"/firefox{vers}/firstrun/",
query={
"utm_source": "direct",
"utm_medium": "none",
"utm_campaign": "redirect",
"utm_content": "eu-firstrun-redirect",
},
),
# bug 960543
redirect(r"^firefox/(?P<vers>[23])\.0/eula", "/legal/eula/firefox-{vers}/"),
# bug 1150713
redirect(r"^firefox/sms(?:/.*)?$", "firefox"),
# Redirects for SeaMonkey project website, now living at seamonkey-project.org
redirect(r"^projects/seamonkey/$", "http://www.seamonkey-project.org/"),
redirect(r"^projects/seamonkey/artwork\.html$", "http://www.seamonkey-project.org/dev/artwork"),
redirect(r"^projects/seamonkey/community\.html$", "http://www.seamonkey-project.org/community"),
redirect(r"^projects/seamonkey/get-involved\.html$", "http://www.seamonkey-project.org/dev/get-involved"),
redirect(r"^projects/seamonkey/index\.html$", "http://www.seamonkey-project.org/"),
redirect(r"^projects/seamonkey/news\.html$", "http://www.seamonkey-project.org/news"),
redirect(r"^projects/seamonkey/project-areas\.html$", "http://www.seamonkey-project.org/dev/project-areas"),
redirect(r"^projects/seamonkey/releases/$", "http://www.seamonkey-project.org/releases/"),
redirect(r"^projects/seamonkey/releases/index\.html$", "http://www.seamonkey-project.org/releases/"),
redirect(r"^projects/seamonkey/review-and-flags\.html$", "http://www.seamonkey-project.org/dev/review-and-flags"),
redirect(r"^projects/seamonkey/releases/(?P<vers>1\..*)\.html$", "http://www.seamonkey-project.org/releases/{vers}"),
redirect(r"^projects/seamonkey/releases/seamonkey(?P<x>.*)/index\.html$", "http://www.seamonkey-project.org/releases/seamonkey{x}/"),
redirect(r"^projects/seamonkey/releases/seamonkey(?P<x>.*/.*)\.html$", "http://www.seamonkey-project.org/releases/seamonkey{x}"),
redirect(r"^projects/seamonkey/releases/updates/(?P<x>.*)$", "http://www.seamonkey-project.org/releases/updates/{x}"),
redirect(r"^projects/seamonkey/start/$", "http://www.seamonkey-project.org/start/"),
# Bug 638948 redirect beta privacy policy link
redirect(r"^firefox/beta/feedbackprivacypolicy/?$", "/privacy/firefox/"),
# Bug 1238248
redirect(r"^firefox/push/?$", "https://support.mozilla.org/kb/push-notifications-firefox"),
# Bug 1239960
redirect(r"^firefox/partners/?$", "https://support.mozilla.org/products/firefox-os"),
# Bug 1243060
redirect(r"^firefox/tiles/?$", "https://support.mozilla.org/kb/about-tiles-new-tab"),
# Bug 1239863, 1329931
redirect(r"^firefox/os(/.*)?$", "https://support.mozilla.org/products/firefox-os"),
# Bug 1252332
redirect(r"^sync/?$", "firefox.sync"),
# Bug 424204
redirect(r"^firefox/help/?$", "https://support.mozilla.org/"),
redirect(r"^fxandroid/?$", "firefox.browsers.mobile.android"),
# Bug 1255882
redirect(r"^firefox/personal", "firefox.new"),
redirect(r"^firefox/upgrade", "firefox.new"),
redirect(r"^firefox/ie", "firefox.new"),
# must go above the bug 1255882 stuff below
redirect(r"^projects/xul/joy-of-xul\.html$", "https://developer.mozilla.org/docs/Mozilla/Tech/XUL/The_Joy_of_XUL"),
redirect(r"^projects/xul/xre(old)?\.html$", "https://developer.mozilla.org/docs/Archive/Mozilla/XULRunner"),
redirect(
r"^projects/xslt/js-interface\.html$",
"https://developer.mozilla.org/docs/Web/XSLT/Using_the_Mozilla_JavaScript_interface_to_XSL_Transformations",
),
redirect(r"^projects/xslt/faq\.html$", "https://developer.mozilla.org/docs/Web/API/XSLTProcessor/XSL_Transformations_in_Mozilla_FAQ"),
redirect(r"^projects/xslt/standalone\.html$", "https://developer.mozilla.org/docs/Archive/Mozilla/Building_TransforMiiX_standalone"),
redirect(r"^projects/plugins/first-install-problem\.html$", "https://developer.mozilla.org/Add-ons/Plugins/The_First_Install_Problem"),
redirect(
r"^projects/plugins/install-scheme\.html$", "https://developer.mozilla.org/docs/Installing_plugins_to_Gecko_embedding_browsers_on_Windows"
),
redirect(
r"^projects/plugins/npruntime-sample-in-visual-studio\.html$",
"https://developer.mozilla.org/docs/Compiling_The_npruntime_Sample_Plugin_in_Visual_Studio",
),
redirect(r"^projects/plugins/npruntime\.html$", "https://developer.mozilla.org/docs/Plugins/Guide/Scripting_plugins"),
redirect(
r"^projects/plugins/plugin-host-control\.html$",
"https://developer.mozilla.org/docs/Archive/Mozilla/ActiveX_Control_for_Hosting_Netscape_Plug-ins_in_IE",
),
redirect(
r"^projects/plugins/xembed-plugin-extension\.html$", "https://developer.mozilla.org/Add-ons/Plugins/XEmbed_Extension_for_Mozilla_Plugins"
),
redirect(r"^projects/netlib/http/http-debugging\.html$", "https://developer.mozilla.org/docs/Mozilla/Debugging/HTTP_logging"),
redirect(r"^projects/netlib/integrated-auth\.html$", "https://developer.mozilla.org/docs/Mozilla/Integrated_authentication"),
redirect(r"^projects/netlib/Link_Prefetching_FAQ\.html$", "https://developer.mozilla.org/docs/Web/HTTP/Link_prefetching_FAQ"),
redirect(r"^projects/embedding/GRE\.html$", "https://developer.mozilla.org/docs/Archive/Mozilla/GRE"),
redirect(r"^projects/embedding/windowAPIs\.html$", "https://developer.mozilla.org/docs/Mozilla/Tech/Embedded_Dialog_API"),
redirect(r"^projects/embedding/howto/config\.html$", "https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser"),
redirect(r"^projects/embedding/howto/Initializations\.html$", "https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser"),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasicsTOC\.html$", "https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#toc"
),
redirect(r"^projects/embedding/embedoverview/EmbeddingBasics\.html$", "https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics"),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics2\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Why_Gecko",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics3\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#What_You_Need_to_Embed",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics4\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Getting_the_Code",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics5\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Understanding_the_Coding_Environment",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics6\.html$", "https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPCOM"
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics7\.html$", "https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPIDL"
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics8\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPConnect_and_XPT_files",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics9\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#String_classes",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics10\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XUL.2FXBL",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics11\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Choosing_Additional_Functionalities",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics12\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#What_Gecko_Provides",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics13\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#What_You_Provide",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics14\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Common_Embedding_Tasks",
),
redirect(
r"^projects/embedding/embedoverview/EmbeddingBasics16\.html$",
"https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Appendix:_Data_Flow_Inside_Gecko",
),
redirect(r"^projects/embedding/examples/", "https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser"),
# Bug 1255882
redirect(r"^projects/bonecho/anti-phishing/?$", "https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work"),
redirect(r"^projects/bonecho(/.*)?$", "firefox.channel.desktop"),
redirect(r"^projects/bonsai(/.*)?$", "https://wiki.mozilla.org/Bonsai"),
redirect(r"^projects/camino(/.*)?$", "http://caminobrowser.org/"),
redirect(r"^projects/cck(/.*)?$", "https://wiki.mozilla.org/CCK"),
redirect(r"^projects/chimera(/.*)?$", "http://caminobrowser.org/"),
redirect(r"^projects/deerpark(/.*)?$", "firefox.channel.desktop"),
redirect(r"^projects/embedding/faq\.html$", "https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/FAQ/How_do_I..."),
redirect(r"^projects/embedding(/.*)?$", "https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla"),
redirect(r"^projects/granparadiso(/.*)?$", "firefox.channel.desktop"),
redirect(r"^projects/inspector/faq\.html$", "https://developer.mozilla.org/docs/Tools/Add-ons/DOM_Inspector/DOM_Inspector_FAQ"),
redirect(r"^projects/inspector(/.*)?$", "https://developer.mozilla.org/docs/Tools/Add-ons/DOM_Inspector"),
redirect(r"^projects/javaconnect(/.*)?$", "http://developer.mozilla.org/en/JavaXPCOM"),
redirect(r"^projects/minefield(/.*)?$", "firefox.channel.desktop"),
redirect(r"^projects/minimo(/.*)?$", "https://wiki.mozilla.org/Mobile"),
redirect(r"^projects/namoroka(/.*)?$", "firefox.channel.desktop"),
redirect(r"^projects/nspr(?:/.*)?$", "https://developer.mozilla.org/docs/NSPR"),
redirect(r"^projects/netlib(/.*)?$", "https://developer.mozilla.org/docs/Mozilla/Projects/Necko"),
redirect(r"^projects/plugins(/.*)?$", "https://developer.mozilla.org/Add-ons/Plugins"),
redirect(r"^projects/rt-messaging(/.*)?$", "http://chatzilla.hacksrus.com/"),
redirect(r"^projects/shiretoko(/.*)?$", "firefox.channel.desktop"),
redirect(r"^projects/string(/.*)?$", "https://developer.mozilla.org/en/XPCOM_string_guide"),
redirect(r"^projects/tech-evangelism(/.*)?$", "https://wiki.mozilla.org/Evangelism"),
redirect(r"^projects/venkman(/.*)?$", "https://developer.mozilla.org/docs/Archive/Mozilla/Venkman"),
redirect(r"^projects/webservices/examples/babelfish-wsdl(/.*)?$", "https://developer.mozilla.org/docs/SOAP_in_Gecko-based_Browsers"),
redirect(r"^projects/xbl(/.*)?$", "https://developer.mozilla.org/docs/Mozilla/Tech/XBL"),
redirect(r"^projects/xforms(/.*)?$", "https://developer.mozilla.org/docs/Archive/Web/XForms"),
redirect(r"^projects/xpcom(/.*)?$", "https://developer.mozilla.org/docs/Mozilla/Tech/XPCOM"),
redirect(r"^projects/xpinstall(/.*)?$", "https://developer.mozilla.org/docs/Archive/Mozilla/XPInstall"),
redirect(r"^projects/xslt(/.*)?$", "https://developer.mozilla.org/docs/Web/XSLT"),
redirect(r"^projects/xul(/.*)?$", "https://developer.mozilla.org/docs/Mozilla/Tech/XUL"),
redirect(r"^quality/help(/.*)?$", "http://quality.mozilla.org/get-involved"),
redirect(r"^quality(/.*)?$", "http://quality.mozilla.org/"),
# Bug 654614 /blocklist -> addons.m.o/blocked
redirect(r"^blocklist(/.*)?$", "https://addons.mozilla.org/blocked/"),
redirect(r"^products/firebird/compare/?$", "/firefox/browsers/compare/"),
redirect(r"^products/firebird/?$", "firefox"),
redirect(r"^products/firebird/download/$", "firefox.new"),
redirect(r"^products/firefox/add-engines\.html$", "https://addons.mozilla.org/search-engines.php"),
redirect(r"^products/firefox/all$", "/firefox/all/"),
redirect(r"^products/firefox/all\.html$", "/firefox/all/"),
redirect(r"^products/firefox/banners\.html$", "/contribute/friends/"),
redirect(r"^products/firefox/buttons\.html$", "/contribute/friends/"),
redirect(r"^products/firefox/download", "firefox.new"),
redirect(r"^products/firefox/get$", "firefox.new"),
redirect(r"^products/firefox/live-bookmarks", "/firefox/features/"),
redirect(r"^products/firefox/mirrors\.html$", "http://www-archive.mozilla.org/mirrors.html"),
redirect(r"^products/firefox/releases/$", "/firefox/releases/"),
redirect(
r"^products/firefox/releases/0\.9\.2\.html$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/firefox/releases/0.9.1.html",
),
redirect(
r"^products/firefox/releases/0\.10\.1\.html$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/firefox/releases/0.10.html",
),
redirect(r"^products/firefox/search", "/firefox/features/"),
redirect(r"^products/firefox/shelf\.html$", "https://blog.mozilla.org/press/awards/"),
redirect(r"^products/firefox/smart-keywords\.html$", "https://support.mozilla.org/en-US/kb/Smart+keywords"),
redirect(r"^products/firefox/support/$", "https://support.mozilla.org/"),
redirect(r"^products/firefox/switch", "firefox.new"),
redirect(r"^products/firefox/system-requirements", "/firefox/system-requirements/"),
redirect(r"^products/firefox/tabbed-browsing", "firefox"),
redirect(r"^products/firefox/text-zoom\.html$", "https://support.mozilla.org/kb/font-size-and-zoom-increase-size-of-web-pages"),
redirect(r"^products/firefox/themes$", "https://addons.mozilla.org/themes/"),
redirect(r"^products/firefox/themes\.html$", "https://addons.mozilla.org/themes/"),
redirect(r"^products/firefox/ui-customize\.html$", "https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars"),
redirect(r"^products/firefox/upgrade", "firefox.new"),
redirect(r"^products/firefox/why/$", "firefox"),
# bug 857246 redirect /products/firefox/start/ to start.mozilla.org
redirect(r"^products/firefox/start/?$", "http://start.mozilla.org"),
# issue 9008
redirect(r"^products/firefox(/.*)?$", "firefox.products.index"),
# bug 1260423
redirect(r"^firefox/choose/?$", "firefox.new"),
# bug 1288552 - redirect /secondrun/ traffic from funnelcake test
redirect(r"^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/secondrun(?:/.*)?", "firefox.browsers.mobile.index", query=False),
# bug 1293539
redirect(r"^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/tour/?$", "https://support.mozilla.org/kb/get-started-firefox-overview-main-features"),
# bug 1295332
redirect(r"^hello/?$", "https://support.mozilla.org/kb/hello-status"),
redirect(r"^firefox/hello/?$", "https://support.mozilla.org/kb/hello-status"),
redirect(r"^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/hello/start/?$", "https://support.mozilla.org/kb/hello-status"),
# bug 1299947, 1326383
redirect(r"^firefox/channel/?$", firefox_channel(), cache_timeout=0),
# Bug 1277196
redirect(
r"^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/firstrun/learnmore/?$",
"firefox.features.index",
query={
"utm_source": "firefox-browser",
"utm_medium": "firefox-browser",
"utm_campaign": "redirect",
"utm_content": "learnmore-tab",
},
),
redirect(
r"^firefox/windows-10/welcome/?$",
"https://support.mozilla.org/kb/how-change-your-default-browser-windows-10",
query={
"utm_source": "firefox-browser",
"utm_medium": "firefox-browser",
"utm_campaign": "redirect",
"utm_content": "windows10-welcome-tab",
},
),
# bug 1369732
redirect(r"^Firefox/?$", "firefox"),
# bug 1386112
redirect(r"^firefox/android/faq/?", "https://support.mozilla.org/products/mobile"),
# bug 1392796
redirect(r"^firefox/desktop/fast/?", "firefox.features.fast"),
redirect(r"^firefox/desktop/trust/?", "firefox.features.independent"),
redirect(r"^firefox/desktop/tips/?", "firefox.features.index"),
redirect(r"^firefox/desktop/customize/?", "https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars"),
redirect(r"^firefox/private-browsing/?", "firefox.features.private-browsing"),
# bug 1405436
redirect(r"^firefox/organic", "/firefox/"),
redirect(r"^firefox/landing/better", "/firefox/"),
redirect(r"^firefox/(new/)?addon", "https://addons.mozilla.org"),
redirect(r"^firefox/tips", "/firefox/features/"),
redirect(r"^firefox/new/.+", "/firefox/new/"),
redirect(r"^firefox/38\.0\.3/releasenotes/$", "/firefox/38.0.5/releasenotes/"),
redirect(r"^firefox/default\.htm", "/firefox/"),
redirect(r"^firefox/android/(?P<version>\d+\.\d+(?:\.\d+)?)$", "/firefox/android/{version}/releasenotes/"),
redirect(r"^firefox/stats/", "/firefox/"),
# bug 1416706
redirect(r"^firefox/desktop/?", "firefox.new"),
# bug 1418500
redirect(r"^firefox/android/?$", "firefox.browsers.mobile.android"),
redirect(r"^firefox/focus/?$", "firefox.browsers.mobile.focus"),
redirect(r"^firefox/ios/?$", "firefox.browsers.mobile.ios"),
# issue 9502
redirect(r"^firefox/quantum/?", "/firefox/browsers/quantum/"),
# bug 1421584, issue 7491
redirect(r"^firefox/organizations/faq/?$", "firefox.enterprise.index"),
# bug 1425865 - Amazon Fire TV goes to SUMO until we have a product page.
redirect(
r"^firefox/fire-tv/?$",
"https://support.mozilla.org/products/firefox-fire-tv/",
permanent=False,
),
# bug 1430894
redirect(r"^firefox/interest-dashboard/?", "https://support.mozilla.org/kb/firefox-add-technology-modernizing"),
# bug 1419244
redirect(r"^firefox/mobile-download(/.*)?", "firefox.browsers.mobile.index"),
# bug 960651, 1436973
redirect(r"(firefox|mobile)/([^/]+)/details(/|/.+\.html)?$", "firefox.unsupported-systems", locale_prefix=False),
redirect(r"^firefox/unsupported/", "firefox.unsupported-systems"),
# bug 1428783
redirect(r"^firefox/dnt/?$", "https://support.mozilla.org/kb/how-do-i-turn-do-not-track-feature"),
# issue 6209
redirect(r"^pocket/?", "/firefox/pocket/"),
# issue 6186
redirect(r"^vote/?", "/firefox/election/"),
# issue 9391
redirect(r"^/firefox/election/?$", "firefox"),
# fxa
redirect(r"^firefox/accounts/features/?", "firefox.accounts"),
# issue 9490
redirect(r"^firefox/features/sync/?", "firefox.sync"),
# bug 1577449
redirect(r"^firefox/features/send-tabs/?", "https://support.mozilla.org/kb/send-tab-firefox-desktop-other-devices"),
# issue 6512
redirect(r"^firefox/firefox\.html$", "firefox.new"),
# issue 6979
redirect(r"^firefoxfightsforyou/?", "firefox"),
# issue 7210
redirect(r"^firefox/account/?$", "firefox.accounts"),
# issue 7287
redirect(r"^accounts/?$", "firefox.accounts"),
# issue 7436
redirect(r"^firefox/feedback/?$", "https://support.mozilla.org/questions/new/desktop"),
# issue 7491
redirect(r"^firefox/organizations/?$", "firefox.enterprise.index"),
# issue 7670
redirect(r"^/firefox/fights-for-you/?", "firefox"),
# issue #7424
redirect(r"^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/content-blocking/start/?$", "https://support.mozilla.org/kb/content-blocking"),
# issue #7424
redirect(r"^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/tracking-protection/start/?$", "https://support.mozilla.org/kb/tracking-protection"),
# issue 8596
redirect(r"firefox/xr/?$", "https://support.mozilla.org/kb/webxr-permission-info-page"),
# issue 8419
redirect(r"firefox/this-browser-comes-highly-recommended/?$", "firefox.developer.index"),
# issue 8420
redirect(r"firefox/dedicated-profiles/?$", "https://support.mozilla.org/kb/dedicated-profiles-firefox-installation"),
# issue 8641
redirect(r"^/firefox/windows-64-bit/?$", "firefox.browsers.windows-64-bit"),
redirect(r"^/firefox/best-browser/?$", "firefox.browsers.best-browser"),
# Unfck campaign
redirect(r"^firefox/unfuck/?$", "firefox.campaign.unfck.index"),
redirect(r"^firefox/love/?$", "firefox.campaign.unfck.index"),
redirect(r"^firefox/liebe/?$", "firefox.campaign.unfck.index"),
redirect(r"^firefox/rendonslenetplusnet/?$", "firefox.campaign.unfck.index"),
redirect(r"^(unfu?ck|love|liebe|rendonslenetplusnet)/?$", "firefox.campaign.unfck.index"),
# issue 9148
redirect(r"^/firefox/campaign/?$", "firefox.new"),
# issue 9788
redirect(r"^/firefox/enterprise/signup(/.*)?$", "firefox.enterprise.index"),
# issue 9953
redirect(r"^/firefox/features/pip/?$", "firefox.features.picture-in-picture"),
# issue 10182
redirect(r"^/firefox/mobile/?$", "firefox.browsers.mobile.index"),
redirect(r"^/exp/firefox/mobile/?$", "firefox.browsers.mobile.index"),
# issue 10292, 10590
redirect(r"^firefox/(?P<version>[^/]+)/whatsnew/(india|africa|france|en|all|china)/?$", "/firefox/{version}/whatsnew/"),
redirect(r"^firefox/whatsnew/(india|africa|france|en|all|china)/?$", "firefox.whatsnew"),
# issue 10703
redirect(r"firefox/lockwise/?", "https://support.mozilla.org/kb/end-of-support-firefox-lockwise"),
# issue 10879
redirect(r"^/exp/?$", "mozorg.home"),
)
| alexgibson/bedrock | bedrock/firefox/redirects.py | Python | mpl-2.0 | 32,590 | 0.005247 |
# -*- coding: utf-8 -*-
#
# Plugins' module file for serverApplet.
# Copyright (C) 2015 Gergely Bódi
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
globalPluginFunctions = ['init', # initializes the plugin with the app object
'load', # loads an account with that plugin
'testLogin', # tests an account after creating it
'startCron', # does cron job functionality for the given login
'nextCron', # returns the time for the next cron job running time for the given login
'getParams'] # returns the params dict for the given login
def localPluginFunctions(modulename):
'''Returns additional plugin functions for the plugin as a dict: {"system tray menu label": "plugin function name"}'''
return __import__('plugin.{}'.format(modulename), fromlist=['actions']).actions
| vendelin8/serverApplet | plugin/__init__.py | Python | gpl-2.0 | 1,584 | 0.004422 |
import OOMP
newPart = OOMP.oompItem(8826)
newPart.addTag("oompType", "CAPC")
newPart.addTag("oompSize", "0603")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "PF100")
newPart.addTag("oompIndex", "V50")
OOMP.parts.append(newPart)
| oomlout/oomlout-OOMP | old/OOMPpart_CAPC_0603_X_PF100_V50.py | Python | cc0-1.0 | 245 | 0 |
"""Support for the Philips Hue sensors as a platform."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
from aiohue import AiohueException, Unauthorized
from aiohue.v1.sensors import TYPE_ZLL_PRESENCE
import async_timeout
from homeassistant.components.sensor import SensorStateClass
from homeassistant.core import callback
from homeassistant.helpers import debounce, entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from ..const import REQUEST_REFRESH_DELAY
from .helpers import remove_devices
from .hue_event import EVENT_CONFIG_MAP
from .sensor_device import GenericHueDevice
SENSOR_CONFIG_MAP: dict[str, Any] = {}
LOGGER = logging.getLogger(__name__)
def _device_id(aiohue_sensor):
# Work out the shared device ID, as described below
device_id = aiohue_sensor.uniqueid
if device_id and len(device_id) > 23:
device_id = device_id[:23]
return device_id
class SensorManager:
"""Class that handles registering and updating Hue sensor entities.
Intended to be a singleton.
"""
SCAN_INTERVAL = timedelta(seconds=5)
def __init__(self, bridge):
"""Initialize the sensor manager."""
self.bridge = bridge
self._component_add_entities = {}
self.current = {}
self.current_events = {}
self._enabled_platforms = ("binary_sensor", "sensor")
self.coordinator = DataUpdateCoordinator(
bridge.hass,
LOGGER,
name="sensor",
update_method=self.async_update_data,
update_interval=self.SCAN_INTERVAL,
request_refresh_debouncer=debounce.Debouncer(
bridge.hass, LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
async def async_update_data(self):
"""Update sensor data."""
try:
async with async_timeout.timeout(4):
return await self.bridge.async_request_call(
self.bridge.api.sensors.update
)
except Unauthorized as err:
await self.bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
async def async_register_component(self, platform, async_add_entities):
"""Register async_add_entities methods for components."""
self._component_add_entities[platform] = async_add_entities
if len(self._component_add_entities) < len(self._enabled_platforms):
LOGGER.debug("Aborting start with %s, waiting for the rest", platform)
return
# We have all components available, start the updating.
self.bridge.reset_jobs.append(
self.coordinator.async_add_listener(self.async_update_items)
)
await self.coordinator.async_refresh()
@callback
def async_update_items(self):
"""Update sensors from the bridge."""
api = self.bridge.api.sensors
if len(self._component_add_entities) < len(self._enabled_platforms):
return
to_add = {}
primary_sensor_devices = {}
current = self.current
# Physical Hue motion sensors present as three sensors in the API: a
# presence sensor, a temperature sensor, and a light level sensor. Of
# these, only the presence sensor is assigned the user-friendly name
# that the user has given to the device. Each of these sensors is
# linked by a common device_id, which is the first twenty-three
# characters of the unique id (then followed by a hyphen and an ID
# specific to the individual sensor).
#
# To set up neat values, and assign the sensor entities to the same
# device, we first, iterate over all the sensors and find the Hue
# presence sensors, then iterate over all the remaining sensors -
# finding the remaining ones that may or may not be related to the
# presence sensors.
for item_id in api:
if api[item_id].type != TYPE_ZLL_PRESENCE:
continue
primary_sensor_devices[_device_id(api[item_id])] = api[item_id]
# Iterate again now we have all the presence sensors, and add the
# related sensors with nice names where appropriate.
for item_id in api:
uniqueid = api[item_id].uniqueid
if current.get(uniqueid, self.current_events.get(uniqueid)) is not None:
continue
sensor_type = api[item_id].type
# Check for event generator devices
event_config = EVENT_CONFIG_MAP.get(sensor_type)
if event_config is not None:
base_name = api[item_id].name
name = event_config["name_format"].format(base_name)
new_event = event_config["class"](api[item_id], name, self.bridge)
self.bridge.hass.async_create_task(
new_event.async_update_device_registry()
)
self.current_events[uniqueid] = new_event
sensor_config = SENSOR_CONFIG_MAP.get(sensor_type)
if sensor_config is None:
continue
base_name = api[item_id].name
primary_sensor = primary_sensor_devices.get(_device_id(api[item_id]))
if primary_sensor is not None:
base_name = primary_sensor.name
name = sensor_config["name_format"].format(base_name)
current[uniqueid] = sensor_config["class"](
api[item_id], name, self.bridge, primary_sensor=primary_sensor
)
to_add.setdefault(sensor_config["platform"], []).append(current[uniqueid])
self.bridge.hass.async_create_task(
remove_devices(
self.bridge,
[value.uniqueid for value in api.values()],
current,
)
)
for platform, value in to_add.items():
self._component_add_entities[platform](value)
class GenericHueSensor(GenericHueDevice, entity.Entity):
"""Representation of a Hue sensor."""
should_poll = False
@property
def available(self):
"""Return if sensor is available."""
return self.bridge.sensor_manager.coordinator.last_update_success and (
self.allow_unreachable
# remotes like Hue Tap (ZGPSwitchSensor) have no _reachability_
or self.sensor.config.get("reachable", True)
)
@property
def state_class(self):
"""Return the state class of this entity, from STATE_CLASSES, if any."""
return SensorStateClass.MEASUREMENT
async def async_added_to_hass(self):
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.bridge.sensor_manager.coordinator.async_add_listener(
self.async_write_ha_state
)
)
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self.bridge.sensor_manager.coordinator.async_request_refresh()
class GenericZLLSensor(GenericHueSensor):
"""Representation of a Hue-brand, physical sensor."""
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {"battery_level": self.sensor.battery}
| home-assistant/home-assistant | homeassistant/components/hue/v1/sensor_base.py | Python | apache-2.0 | 7,575 | 0.001056 |
from decimal import Decimal
from django.utils import timezone
from rest_framework import serializers
import rest_framework
import datetime
import django
import pytest
import uuid
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` is set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
output = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class MockHTMLDict(dict):
"""
This class mocks up a dictionary like object, that behaves
as if it was returned for multipart or urlencoded data.
"""
getlist = None
class TestBooleanHTMLInput:
def setup(self):
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
self.Serializer = TestSerializer
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
# This class mocks up a dictionary like object, that behaves
# as if it was returned for multipart or urlencoded data.
serializer = self.Serializer(data=MockHTMLDict())
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
class TestHTMLInput:
def test_empty_html_charfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=MockHTMLDict())
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=MockHTMLDict({'message': ''}))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=MockHTMLDict({'expiry': ''}))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=MockHTMLDict({'message': ''}))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=MockHTMLDict())
assert serializer.is_valid()
assert serializer.validated_data == {}
class TestCreateOnlyDefault:
def setup(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set_context
on the callable if possible
"""
class TestCallableDefault:
def set_context(self, serializer_field):
self.field = serializer_field
def __call__(self):
return "success" if hasattr(self, 'field') else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
# Tests for field input and output values.
# ----------------------------------------
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# Boolean types...
class TestBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
None: ['This field may not be null.']
}
outputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
'other': True
}
field = serializers.BooleanField()
class TestNullBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.NullBooleanField()
# String types...
class TestCharField(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
class TestEmailField(FieldValues):
"""
Valid and invalid values for `EmailField`.
"""
valid_inputs = {
'example@example.com': 'example@example.com',
' example@example.com ': 'example@example.com',
}
invalid_inputs = {
'examplecom': ['Enter a valid email address.']
}
outputs = {}
field = serializers.EmailField()
class TestRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
class TestSlugField(FieldValues):
"""
Valid and invalid values for `SlugField`.
"""
valid_inputs = {
'slug-99': 'slug-99',
}
invalid_inputs = {
'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.']
}
outputs = {}
field = serializers.SlugField()
class TestURLField(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
class TestUUIDField(FieldValues):
"""
Valid and invalid values for `UUIDField`.
"""
valid_inputs = {
'825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda')
}
invalid_inputs = {
'825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.']
}
outputs = {
uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda'
}
field = serializers.UUIDField()
# Number types...
class TestIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField`.
"""
valid_inputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0,
'1.0': 1
}
invalid_inputs = {
0.5: ['A valid integer is required.'],
'abc': ['A valid integer is required.'],
'0.5': ['A valid integer is required.']
}
outputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0
}
field = serializers.IntegerField()
class TestMinMaxIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
class TestFloatField(FieldValues):
"""
Valid and invalid values for `FloatField`.
"""
valid_inputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
invalid_inputs = {
'abc': ["A valid number is required."]
}
outputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
field = serializers.FloatField()
class TestMinMaxFloatField(FieldValues):
"""
Valid and invalid values for `FloatField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
1.0: 1.0,
3.0: 3.0,
}
invalid_inputs = {
0.9: ['Ensure this value is greater than or equal to 1.'],
3.1: ['Ensure this value is less than or equal to 3.'],
'0.0': ['Ensure this value is greater than or equal to 1.'],
'3.1': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.FloatField(min_value=1, max_value=3)
class TestDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+2': Decimal('200'),
}
invalid_inputs = (
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
class TestMinMaxDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField` with min and max limits.
"""
valid_inputs = {
'10.0': Decimal('10.0'),
'20.0': Decimal('20.0'),
}
invalid_inputs = {
'9.9': ['Ensure this value is greater than or equal to 10.'],
'20.1': ['Ensure this value is less than or equal to 20.'],
}
outputs = {}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
min_value=10, max_value=20
)
class TestNoStringCoercionDecimalField(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
# Date & time serializers...
class TestDateField(FieldValues):
"""
Valid and invalid values for `DateField`.
"""
valid_inputs = {
'2001-01-01': datetime.date(2001, 1, 1),
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1),
}
invalid_inputs = {
'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
'2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'],
}
outputs = {
datetime.date(2001, 1, 1): '2001-01-01',
'2001-01-01': '2001-01-01',
None: None,
'': None,
}
field = serializers.DateField()
class TestCustomInputFormatDateField(FieldValues):
"""
Valid and invalid values for `DateField` with a cutom input format.
"""
valid_inputs = {
'1 Jan 2001': datetime.date(2001, 1, 1),
}
invalid_inputs = {
'2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateField(input_formats=['%d %b %Y'])
class TestCustomOutputFormatDateField(FieldValues):
"""
Values for `DateField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): '01 Jan 2001'
}
field = serializers.DateField(format='%d %b %Y')
class TestNoOutputFormatDateField(FieldValues):
"""
Values for `DateField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1)
}
field = serializers.DateField(format=None)
class TestDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
# Django 1.4 does not support timezone string parsing.
'2001-01-01T14:00+01:00' if (django.VERSION > (1, 4)) else '2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC())
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z'
}
field = serializers.DateTimeField(default_timezone=timezone.UTC())
class TestCustomInputFormatDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a cutom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y'])
class TestCustomOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
class TestNoOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00),
}
field = serializers.DateTimeField(format=None)
class TestNaiveDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {}
field = serializers.DateTimeField(default_timezone=None)
class TestTimeField(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 00): '13:00:00'
}
field = serializers.TimeField()
class TestCustomInputFormatTimeField(FieldValues):
"""
Valid and invalid values for `TimeField` with a custom input format.
"""
valid_inputs = {
'1:00pm': datetime.time(13, 00),
}
invalid_inputs = {
'13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'],
}
outputs = {}
field = serializers.TimeField(input_formats=['%I:%M%p'])
class TestCustomOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): '01:00PM'
}
field = serializers.TimeField(format='%I:%M%p')
class TestNoOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
@pytest.mark.skipif(django.VERSION < (1, 8),
reason='DurationField is only available for django1.8+')
class TestDurationField(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
if django.VERSION >= (1, 8):
field = serializers.DurationField()
# Choice types...
class TestChoiceField(FieldValues):
"""
Valid and invalid values for `ChoiceField`.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'amazing': ['"amazing" is not a valid choice.']
}
outputs = {
'good': 'good',
'': '',
'amazing': 'amazing',
}
field = serializers.ChoiceField(
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.ChoiceField(
allow_blank=True,
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
output = field.run_validation('')
assert output == ''
class TestChoiceFieldWithType(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses an integer type,
instead of a char type.
"""
valid_inputs = {
'1': 1,
3: 3,
}
invalid_inputs = {
5: ['"5" is not a valid choice.'],
'abc': ['"abc" is not a valid choice.']
}
outputs = {
'1': 1,
1: 1
}
field = serializers.ChoiceField(
choices=[
(1, 'Poor quality'),
(2, 'Medium quality'),
(3, 'Good quality'),
]
)
class TestChoiceFieldWithListChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
class TestMultipleChoiceField(FieldValues):
"""
Valid and invalid values for `MultipleChoiceField`.
"""
valid_inputs = {
(): set(),
('aircon',): set(['aircon']),
('aircon', 'manual'): set(['aircon', 'manual']),
}
invalid_inputs = {
'abc': ['Expected a list of items but got type "str".'],
('aircon', 'incorrect'): ['"incorrect" is not a valid choice.']
}
outputs = [
(['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect']))
]
field = serializers.MultipleChoiceField(
choices=[
('aircon', 'AirCon'),
('manual', 'Manual drive'),
('diesel', 'Diesel'),
]
)
def test_against_partial_and_full_updates(self):
# serializer = self.Serializer(data=MockHTMLDict())
from django.http import QueryDict
field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b')))
field.partial = False
assert field.get_value(QueryDict({})) == []
field.partial = True
assert field.get_value(QueryDict({})) == rest_framework.fields.empty
# File serializers...
class MockFile:
def __init__(self, name='', size=0, url=''):
self.name = name
self.size = size
self.url = url
def __eq__(self, other):
return (
isinstance(other, MockFile) and
self.name == other.name and
self.size == other.size and
self.url == other.url
)
class TestFileField(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
class TestFieldFieldWithName(FieldValues):
"""
Values for `FileField` with a filename output instead of URLs.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = [
(MockFile(name='example.txt', url='/example.txt'), 'example.txt')
]
field = serializers.FileField(use_url=False)
# Stub out mock Django `forms.ImageField` class so we don't *actually*
# call into it's regular validation, or require PIL for testing.
class FailImageValidation(object):
def to_python(self, value):
raise serializers.ValidationError(self.error_messages['invalid_image'])
class PassImageValidation(object):
def to_python(self, value):
return value
class TestInvalidImageField(FieldValues):
"""
Values for an invalid `ImageField`.
"""
valid_inputs = {}
invalid_inputs = [
(MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.'])
]
outputs = {}
field = serializers.ImageField(_DjangoImageField=FailImageValidation)
class TestValidImageField(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
class TestListField(FieldValues):
"""
Values for `ListField` with IntegerField as child.
"""
valid_inputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
([1, 2, 'error'], ['A valid integer is required.'])
]
outputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
field = serializers.ListField(child=serializers.IntegerField())
class TestUnvalidatedListField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
class TestDictField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
invalid_inputs = [
({'a': 1, 'b': None}, ['This field may not be null.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField())
class TestUnvalidatedDictField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}),
]
field = serializers.DictField()
# Tests for FieldField.
# ---------------------
class MockRequest:
def build_absolute_uri(self, value):
return 'http://example.com' + value
class TestFileFieldContext:
def test_fully_qualified_when_request_in_context(self):
field = serializers.FileField(max_length=10)
field._context = {'request': MockRequest()}
obj = MockFile(name='example.txt', url='/example.txt')
value = field.to_representation(obj)
assert value == 'http://example.com/example.txt'
# Tests for SerializerMethodField.
# --------------------------------
class TestSerializerMethodField:
def test_serializer_method_field(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField()
def get_example_field(self, obj):
return 'ran get_example_field(%d)' % obj['example_field']
serializer = ExampleSerializer({'example_field': 123})
assert serializer.data == {
'example_field': 'ran get_example_field(123)'
}
def test_redundant_method_name(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField('get_example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `get_example_field` on "
"SerializerMethodField 'example_field' in serializer "
"'ExampleSerializer', because it is the same as the default "
"method name. Remove the `method_name` argument."
)
| ticosax/django-rest-framework | tests/test_fields.py | Python | bsd-2-clause | 38,463 | 0.00117 |
from sys import *
from pdflib_py import *
p = PDF_new()
PDF_open_file(p, "gradients.pdf")
PDF_set_parameter(p, "usercoordinates", "true")
PDF_set_value(p, "compress", 0)
PDF_set_info(p, "Author", "pdflib")
PDF_set_info(p, "Creator", "pdflib_py")
PDF_set_info(p, "Title", "gradients")
width = 1024
height = 800
PDF_begin_page(p, width, height)
type,x,params = "radial",0,"r0=0 r1=320"
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.0, 1.0)
shading = PDF_shading(p, type, 160+x,160+y, 160+x, 160+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_fill(p)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_stroke(p)
type,x,params = "axial",200,""
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 0+x,0+y, 320+x,320+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_fill(p)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_stroke(p)
type,x,params = "radial",500,"r0=0 r1=220"
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 120+x, 340+y, 120+x, 340+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_fill(p)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_stroke(p)
type,x,params = "axial",600,""
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 80+x, 80+y, 80+x, 640+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_fill(p)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_stroke(p)
type,x,params = "axial",50,""
y = 300
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 80+x, 80+y, 400+x, 80+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+160)
PDF_lineto(p, x+400, y+160)
PDF_lineto(p, x+400, y+80)
PDF_lineto(p, x+80, y+80)
PDF_fill(p)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+160)
PDF_lineto(p, x+400, y+160)
PDF_lineto(p, x+400, y+80)
PDF_lineto(p, x+80, y+80)
PDF_stroke(p)
PDF_end_page(p)
PDF_close(p)
PDF_delete(p);
| brad/swftools | spec/gradients.py | Python | gpl-2.0 | 3,650 | 0.019452 |
# Copyright (C) 2009-2017 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import socket
import sys
import time
import tracing
import cliapp
import larch
import paramiko
import ttystatus
import obnamlib
class ObnamIOError(obnamlib.ObnamError):
msg = 'I/O error: {filename}: {errno}: {strerror}'
class ObnamSSHError(obnamlib.ObnamError):
msg = 'SSH error: {msg}'
class ObnamSystemError(obnamlib.ObnamError):
msg = 'System error: {filename}: {errno}: {strerror}'
class App(cliapp.Application):
'''Main program for backup program.'''
def add_settings(self):
# General settings.
self.settings.string(
['repository', 'r'],
'name of backup repository (can be pathname or supported URL)',
metavar='URL')
self.settings.string(
['client-name'],
'name of client (defaults to hostname)',
default=self.deduce_client_name())
self.settings.boolean(
['quiet', 'silent'],
'be silent: show only error messages, no progress updates')
self.settings.boolean(
['verbose'],
'be verbose: tell the user more of what is going on and '
'generally make sure the user is aware of what is happening '
'or at least that something is happening and '
'also make sure their screen is getting updates frequently '
'and that there is changes happening all the time so they '
'do not get bored and that they in fact get frustrated by '
'getting distracted by so many updates that they will move '
'into the Gobi desert to live under a rock')
self.settings.boolean(
['pretend', 'dry-run', 'no-act'],
'do not actually change anything (works with '
'backup, forget and restore only, and may only '
'simulate approximately real behavior)')
self.settings.integer(
['lock-timeout'],
'when locking in the backup repository, '
'wait TIMEOUT seconds for an existing lock '
'to go away before giving up',
metavar='TIMEOUT',
default=60)
# Repository format selection.
self.settings.choice(
['repository-format'],
['6', obnamlib.GREEN_ALBATROSS_VERSION],
'use FORMAT for new repositories; '
'one of "6", "{}"'.format(obnamlib.GREEN_ALBATROSS_VERSION),
metavar='FORMAT')
algos = list(obnamlib.checksum_algorithms)
algos.remove('sha512') # We move this first in the list, default.
algos.remove('md5') # We're NOT letting the user choose MD5.
algos = ['sha512'] + algos
self.settings.choice(
['checksum-algorithm'],
algos,
'use CHECKSUM for checksum algorithm '
'(not for repository format 6); '
'one of: ' +
', '.join(algos),
metavar='CHECKSUM')
# Performance related settings.
perf_group = obnamlib.option_group['perf']
self.settings.bytesize(
['node-size'],
'size of B-tree nodes on disk; only affects new '
'B-trees so you may need to delete a client '
'or repository to change this for existing '
'repositories',
default=obnamlib.DEFAULT_NODE_SIZE,
group=perf_group)
self.settings.bytesize(
['chunk-size'],
'size of chunks of file data backed up',
default=obnamlib.DEFAULT_CHUNK_SIZE,
group=perf_group)
self.settings.bytesize(
['upload-queue-size'],
'length of upload queue for B-tree nodes',
default=obnamlib.DEFAULT_UPLOAD_QUEUE_SIZE,
group=perf_group)
self.settings.bytesize(
['lru-size'],
'size of LRU cache for B-tree nodes',
default=obnamlib.DEFAULT_LRU_SIZE,
group=perf_group)
self.settings.integer(
['idpath-depth'],
'depth of chunk id mapping',
default=obnamlib.IDPATH_DEPTH,
group=perf_group)
self.settings.integer(
['idpath-bits'],
'chunk id level size',
default=obnamlib.IDPATH_BITS,
group=perf_group)
self.settings.integer(
['idpath-skip'],
'chunk id mapping lowest bits skip',
default=obnamlib.IDPATH_SKIP,
group=perf_group)
# Settings to help developers and development of Obnam.
devel_group = obnamlib.option_group['devel']
self.settings.string_list(
['trace'],
'add to filename patters for which trace '
'debugging logging happens',
group=devel_group)
self.settings.string(
['pretend-time'],
'pretend it is TIMESTAMP (YYYY-MM-DD HH:MM:SS); '
'this is only useful for testing purposes',
metavar='TIMESTAMP',
group=devel_group)
self.settings.integer(
['crash-limit'],
'artificially crash the program after COUNTER '
'files written to the repository; this is '
'useful for crash testing the application, '
'and should not be enabled for real use; '
'set to 0 to disable (disabled by default)',
metavar='COUNTER',
group=devel_group)
# The following needs to be done here, because it needs
# to be done before option processing. This is a bit ugly,
# but the best we can do with the current cliapp structure.
# Possibly cliapp will provide a better hook for us to use
# later on, but this is reality now.
self.setup_ttystatus()
self.fsf = obnamlib.VfsFactory()
self.repo_factory = obnamlib.RepositoryFactory()
self.setup_hooks()
self.settings['log-level'] = 'info'
def deduce_client_name(self):
return socket.gethostname()
def setup_hooks(self):
self.hooks = obnamlib.HookManager()
self.hooks.new('config-loaded')
self.hooks.new('shutdown')
# The repository factory creates all repository related hooks.
self.repo_factory.setup_hooks(self.hooks)
def setup(self):
self.pluginmgr.plugin_arguments = (self,)
def process_args(self, args):
try:
try:
if self.settings['quiet']:
self.ts.disable()
for pattern in self.settings['trace']:
tracing.trace_add_pattern(pattern)
self.hooks.call('config-loaded')
cliapp.Application.process_args(self, args)
self.hooks.call('shutdown')
except paramiko.SSHException as e:
logging.critical(
'Caught SSHExcpetion: %s', str(e), exc_info=True)
raise ObnamSSHError(msg=str(e))
except IOError as e:
logging.critical('Caught IOError: %s', str(e), exc_info=True)
raise ObnamIOError(
errno=e.errno, strerror=e.strerror, filename=e.filename)
except OSError as e:
logging.critical('Caught OSError: %s', str(e), exc_info=True)
raise ObnamSystemError(
errno=e.errno, strerror=e.strerror, filename=e.filename)
except larch.Error as e:
logging.critical(str(e), exc_info=True)
sys.stderr.write('ERROR: %s\n' % str(e))
sys.exit(1)
except obnamlib.StructuredError as e:
logging.critical(
self._indent_multiline(e.formatted()),
exc_info=True)
sys.stderr.write('ERROR: %s\n' % e.formatted())
sys.exit(1)
def _indent_multiline(self, s):
lines = s.splitlines()
return ''.join(
[lines[0] + '\n'] +
[' {}\n'.format(line) for line in lines[1:]])
def setup_ttystatus(self):
self.ts = ttystatus.TerminalStatus(period=0.1)
if self.settings['quiet']:
self.ts.disable()
def get_repository_object(self, create=False, repofs=None):
'''Return an implementation of obnamlib.RepositoryInterface.'''
logging.info('Opening repository: %s', self.settings['repository'])
tracing.trace('create=%s', create)
tracing.trace('repofs=%s', repofs)
repopath = self.settings['repository']
if repofs is None:
repofs = self.fsf.new(repopath, create=create)
if self.settings['crash-limit'] > 0:
repofs.crash_limit = self.settings['crash-limit']
repofs.connect()
else:
repofs.reinit(repopath)
kwargs = {
'lock_timeout': self.settings['lock-timeout'],
'node_size': self.settings['node-size'],
'upload_queue_size': self.settings['upload-queue-size'],
'lru_size': self.settings['lru-size'],
'idpath_depth': self.settings['idpath-depth'],
'idpath_bits': self.settings['idpath-bits'],
'idpath_skip': self.settings['idpath-skip'],
'hooks': self.hooks,
'current_time': self.time,
'chunk_size': self.settings['chunk-size'],
'chunk_cache_size': self.settings['chunk-cache-size'],
'chunk_bag_size': self.settings['chunk-bag-size'],
'dir_cache_size': self.settings['dir-cache-size'],
'dir_bag_size': self.settings['dir-bag-size'],
'checksum_algorithm': self.settings['checksum-algorithm'],
}
if create:
return self.repo_factory.create_repo(
repofs, self.get_default_repository_class(), **kwargs)
else:
return self.repo_factory.open_existing_repo(repofs, **kwargs)
def get_default_repository_class(self):
classes = {
'6': obnamlib.RepositoryFormat6,
obnamlib.GREEN_ALBATROSS_VERSION: obnamlib.RepositoryFormatGA,
}
return classes[self.settings['repository-format']]
def time(self):
'''Return current time in seconds since epoch.
This is a wrapper around time.time() so that it can be overridden
with the --pretend-time setting.
'''
s = self.settings['pretend-time']
if s:
t = time.strptime(s, '%Y-%m-%d %H:%M:%S')
return time.mktime(t)
else:
return time.time()
| obnam-mirror/obnam | obnamlib/app.py | Python | gpl-3.0 | 11,277 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-10 22:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dns', '0002_auto_20151228_0134'),
]
operations = [
migrations.CreateModel(
name='Redirection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abbr', models.CharField(max_length=100, unique=True)),
('target', models.URLField()),
],
),
]
| garncarz/dns-server | dns/migrations/0003_redirection.py | Python | gpl-2.0 | 640 | 0.001563 |
#!/usr/bin/python
#Copyright (c) 2016, Justin R. Klesmith
#All rights reserved.
from __future__ import division
from math import log, sqrt, pow
import argparse, os, random
#Set the author information
__author__ = "Justin R. Klesmith"
__copyright__ = "Copyright 2016, Justin R. Klesmith"
__credits__ = ["Justin R. Klesmith", "Timothy A. Whitehead"]
__license__ = "BSD-3"
__version__ = "X.X, Build: 2016XXXX"
__maintainer__ = "Justin R. Klesmith"
__email__ = ["klesmit3@msu.edu", "justinklesmith@gmail.com", "justinklesmith@evodyn.com"]
#Get commandline arguments
parser = argparse.ArgumentParser(description='Process the MSA to get into a format for PSI-Blast')
parser.add_argument('-m', dest='msa', action='store', required=True, help='MSA file path')
parser.add_argument('-l', dest='length', action='store', required=True, help='Length of protein')
#parser.add_argument('-d', dest='dssp', action='store', required=True, help='Path to processed DSSP output')
args = parser.parse_args()
#Populate array
Mutations = {}
for j in xrange(1,int(args.length)):
#Mutations[j] = False
Mutations[j] = None
#Import DSSP Information from CSV
#if os.path.isfile(args.dssp):
# with open(args.dssp, 'r') as infile: #Open the file with the wild-type protein sequence
# for line in infile:
# split = line.split(",")
# if split[0] != "ID": #Skip the CSV header
# location = int(split[0])
# ss = str(split[1]).rstrip("\n\r")
#
# if len(ss) == 0:
# Mutations[location] = "L"
# else:
# Mutations[location] = ss
#If loop then set true
#if len(ss) == 0 or ss == "S" or ss == "T":
#Mutations[location] = True
#else:
# print "Cannot open the processed DSSP"
# quit()
#Import msa alignment
Alignment = ""
outfile = open('msatemp.csv', 'w')
if os.path.isfile(args.msa):
with open(args.msa, 'r') as infile: #Open the file with the wild-type protein sequence
Output = ""
for line in infile:
#Check to see if we have a header
if line[0] == ">":
#print Output #Print the current alignment
Alignment = Alignment + Output + "\n"
Output = "" #Empty the current alignment
Output = Output + line.rstrip('\n') + "," #Assemble the line
else:
Output = Output + line.rstrip('\n') #Assemble the line
else:
print "Cannot open the processed NCBI CSV"
quit()
outfile.write(Alignment)
outfile.close()
#Import MSA into a lookup table
MSATable = {}
outfile = open('msatemp2.csv', 'w')
with open('msatemp.csv', 'r') as infile: #Open the file with the wild-type protein sequence
for line in infile:
split = line.split(",")
if len(line) > 10:
MSATable.update({split[0] : split[1].rstrip("\n")})
outfile.write(split[1])
outfile.close()
#Make a DSSP lookup string
Wildtype = MSATable[">ENTER YOUR WILD-TYPE SEQUENCE HEADER NAME HERE found in the MSA or CDHIT Cluster"]
MSAWTLen = len(Wildtype)
#CorrectedDSSP = ""
#DSSPCount = 1
#print Wildtype
#DSSP = ""
#for j in xrange(1,int(args.length)):
#Mutations[j] = False
#DSSP = DSSP + Mutations[j].rstrip("\n\r")
#print DSSP
#for j in xrange(0,MSAWTLen):
# if Wildtype[j] == "-":
# CorrectedDSSP = CorrectedDSSP + "-"
# else:
# CorrectedDSSP = CorrectedDSSP + Mutations[DSSPCount]
# DSSPCount = DSSPCount + 1
#Add the lookup string to the 2nd temp table
#with open('msatemp2.csv', 'r+') as f:
# content = f.read()
# f.seek(0, 0)
# f.write(CorrectedDSSP + '\n' + Wildtype + '\n\n' + content)
#Time to mark the insertions
XedOut = ""
outfile2 = open('msatemp3.csv', 'w')
Wildtype = Wildtype + "\n"
MSAWTLen = len(Wildtype)
with open('msatemp2.csv', 'r') as f:
for line in f:
for i in xrange(0,MSAWTLen):
if Wildtype[i] == "-":
XedOut = XedOut + "X"
else:
XedOut = XedOut + line[i]
outfile2.write(XedOut)
outfile2.close()
#Now let's delete the insertions
output = ""
with open('msatemp3.csv', 'r') as f1:
for line in f1:
Len = len(line)
for i in xrange(0, Len):
if line[i] != "X":
output = output + line[i]
f1o = open('msatemp4.csv', 'w')
f1o.write(output)
f1o.close()
#to make the fifth file sequences that lacked the length and lgk3
output = ""
with open('msatemp5.csv', 'r') as f2:
for line in f2:
output = output + ">" + str(random.random()) + "\n" + line + "\n"
f1o = open('msatemp6.csv', 'w')
f1o.write(output)
f1o.close() | JKlesmith/Bioinformatics | ProcessMSA.py | Python | bsd-3-clause | 4,782 | 0.015056 |
#
# My first attempt at python
# calibrate accelerometer
#
import re
import scipy
from scipy import optimize
from scipy import linalg
from pylab import *
#
# parse the log
#
def read_log(ac_id, filename, sensor):
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while 1:
line = f.readline().strip()
if line == '':
break
m=re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return scipy.array(list_meas)
#
# select only non-noisy data
#
def filter_meas(meas, window_size, noise_threshold):
filtered_meas = []
filtered_idx = []
for i in range(window_size,len(meas)-window_size):
noise = meas[i-window_size:i+window_size,:].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i,:])
filtered_idx.append(i)
return scipy.array(filtered_meas), filtered_idx
#
# initial boundary based calibration
#
def get_min_max_guess(meas, scale):
max_meas = meas[:,:].max(axis=0)
min_meas = meas[:,:].min(axis=0)
n = (max_meas + min_meas) / 2
sf = 2*scale/(max_meas - min_meas)
return scipy.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
#
# scale the set of measurements
#
def scale_measurements(meas, p):
l_comp = [];
l_norm = [];
for m in meas[:,]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return scipy.array(l_comp), scipy.array(l_norm)
#
# print xml for airframe file
#
def print_xml(p, sensor, res):
print ""
print "<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>"
print "<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>"
print "<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>"
print "<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>"
print "<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>"
print "<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>"
filename = 'log_accel_booz2_a2'
ac_id = "151"
if 1:
sensor = "ACCEL"
sensor_ref = 9.81
sensor_res = 10
noise_window = 20;
noise_threshold = 40;
else:
sensor = "MAG"
sensor_ref = 1.
sensor_res = 11
noise_window = 10;
noise_threshold = 1000;
print "reading file "+filename+" for aircraft "+ac_id+" and sensor "+sensor
measurements = read_log(ac_id, filename, sensor)
print "found "+str(len(measurements))+" records"
flt_meas, flt_idx = filter_meas(measurements, noise_window, noise_threshold)
print "remaining "+str(len(flt_meas))+" after low pass"
p0 = get_min_max_guess(flt_meas, sensor_ref)
cp0, np0 = scale_measurements(flt_meas, p0)
print "initial guess : "+str(np0.mean())+" "+str(np0.std())
print p0
def err_func(p,meas,y):
cp, np = scale_measurements(meas, p)
err = y*scipy.ones(len(meas)) - np
return err
p1, success = optimize.leastsq(err_func, p0[:], args=(flt_meas, sensor_ref))
cp1, np1 = scale_measurements(flt_meas, p1)
print "optimized guess : "+str(np1.mean())+" "+str(np1.std())
print p1
print_xml(p1, sensor, sensor_res)
subplot(3,1,1)
plot(measurements[:,0])
plot(measurements[:,1])
plot(measurements[:,2])
plot(flt_idx, flt_meas[:,0], 'ro')
plot(flt_idx, flt_meas[:,1], 'ro')
plot(flt_idx, flt_meas[:,2], 'ro')
subplot(3,2,3)
plot(cp0[:,0]);
plot(cp0[:,1]);
plot(cp0[:,2]);
plot(-sensor_ref*scipy.ones(len(flt_meas)));
plot(sensor_ref*scipy.ones(len(flt_meas)));
subplot(3,2,4)
plot(np0);
plot(sensor_ref*scipy.ones(len(flt_meas)));
subplot(3,2,5)
plot(cp1[:,0]);
plot(cp1[:,1]);
plot(cp1[:,2]);
plot(-sensor_ref*scipy.ones(len(flt_meas)));
plot(sensor_ref*scipy.ones(len(flt_meas)));
subplot(3,2,6)
plot(np1);
plot(sensor_ref*scipy.ones(len(flt_meas)));
show();
| pchickey/paparazzi-linux-release | sw/tools/calibration/calib.py | Python | gpl-2.0 | 3,989 | 0.018802 |
from network import WLAN
###############################################################################
# Settings for WLAN STA mode
###############################################################################
WLAN_MODE = 'off'
#WLAN_SSID = ''
#WLAN_AUTH = (WLAN.WPA2,'')
###############################################################################
# LoRaWAN Configuration
###############################################################################
# May be either 'otaa', 'abp', or 'off'
LORA_MODE = 'otaa'
# Settings for mode 'otaa'
LORA_OTAA_EUI = '70B3D57EF0001ED4'
LORA_OTAA_KEY = None # See README.md for instructions!
# Settings for mode 'abp'
#LORA_ABP_DEVADDR = ''
#LORA_ABP_NETKEY = ''
#LORA_ABP_APPKEY = ''
# Interval between measures transmitted to TTN.
# Measured airtime of transmission is 56.6 ms, fair use policy limits us to
# 30 seconds per day (= roughly 500 messages). We default to a 180 second
# interval (=480 messages / day).
LORA_SEND_RATE = 180
###############################################################################
# GNSS Configuration
###############################################################################
GNSS_UART_PORT = 1
GNSS_UART_BAUD = 9600
GNSS_ENABLE_PIN = 'P8'
| ttn-be/ttnmapper | config.py | Python | mit | 1,297 | 0.010023 |
#!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from argparse import ArgumentParser
import plasma
import plasma.lib.utils
import plasma.lib.colors
from plasma.lib.database import Database
from plasma.lib.disassembler import Disassembler, NB_LINES_TO_DISASM
from plasma.lib.utils import die, error, debug__
from plasma.lib.generate_ast import generate_ast
from plasma.lib.exceptions import ExcArch, ExcFileFormat, ExcIfelse, ExcPEFail
#
# The global context variable is always named as gctx
#
class GlobalContext():
def __init__(self):
# TODO : let globally ?
plasma.lib.utils.gctx = self
plasma.lib.colors.gctx = self
self.comments = True # always True, will be removed
# For info() messages
self.quiet = False
self.is_interactive = False
# Command line options
self.print_andif = True
self.color = True
self.max_data_size = 30
self.filename = None
self.syms = False
self.calls_in_section = None
self.entry = None # string : symbol | EP | 0xNNNN
self.do_dump = False
self.vim = False
self.nb_lines = 30
self.graph = False # Print graph != gph -> object
self.interactive_mode = False
self.debug = False
self.raw_base = 0
self.raw_big_endian = False
self.list_sections = False
self.print_bytes = False
self.raw_type = None
self.print_data = False
self.capstone_string = 0 # See lib.ui.visual.main_cmd_inst_output
self.show_mangling = True
self.autoanalyzer = True
self.debugsp = False
# Built objects
self.dis = None # Disassembler
self.libarch = None # module lib.arch.<BIN_ARCH>
self.db = None # Database
self.api = None # Api
def parse_args(self):
parser = ArgumentParser(description=
'Reverse engineering for x86/ARM/MIPS binaries. Generation of pseudo-C. '
'Supported formats : ELF, PE. More commands available in the interactive'
' mode. https://github.com/joelpx/plasma')
parser.add_argument('filename', nargs='?', metavar='FILENAME')
parser.add_argument('-nc', '--nocolor', action='store_true')
parser.add_argument('-g', '--graph', action='store_true',
help='Generate a file graph.dot.')
parser.add_argument('--noandif', action='store_true',
help="Print normal 'if' instead of 'andif'")
parser.add_argument('--datasize', type=int, default=30, metavar='N',
help='default 30, maximum of chars to display for strings or bytes array.')
parser.add_argument('-x', '--entry', metavar='SYMBOLNAME|0xXXXXX|EP',
help='Pseudo-decompilation, default is main. EP stands for entry point.')
parser.add_argument('--vim', action='store_true',
help='Generate syntax colors for vim')
parser.add_argument('-s', '--symbols', action='store_true',
help='Print all symbols')
parser.add_argument('--sections', action='store_true',
help='Print all sections')
parser.add_argument('--dump', action='store_true',
help='Dump asm without decompilation')
parser.add_argument('-l', '--lines', type=int, default=30, metavar='N',
help='Max lines used with --dump')
parser.add_argument('--nbytes', type=int, default=0, metavar='N',
help='Print n bytes.')
parser.add_argument('-i', '--interactive', action='store_true',
help='Interactive mode')
parser.add_argument('-d', '--opt_debug', action='store_true')
parser.add_argument('--raw', metavar='x86|x64|arm|mips|mips64',
help='Consider the input file as a raw binary')
parser.add_argument('--rawbase', metavar='0xXXXXX',
help='Set base address of a raw file (default=0)')
parser.add_argument('--rawbe', action='store_true',
help='If not set it\'s in little endian')
parser.add_argument('-na', '--noautoanalyzer', action='store_true',
help='Disable analysis on the entry point / symbols and don\'t scan memmory. You can force it with the command push_analyze_symbols.')
parser.add_argument('--debugsp', action='store_true',
help="Print the stack offset on each instructions. Warning: these values will not be saved in the database.")
args = parser.parse_args()
self.debug = args.opt_debug
self.print_andif = not args.noandif
self.color = not args.nocolor
self.max_data_size = args.datasize
self.filename = args.filename
self.raw_type = args.raw
self.raw_base = args.rawbase
self.syms = args.symbols
self.entry = args.entry
self.do_dump = args.dump
self.vim = args.vim
self.interactive_mode = args.interactive
self.nb_lines = args.lines
self.graph = args.graph
self.raw_big_endian = args.rawbe
self.list_sections = args.sections
self.autoanalyzer = not args.noautoanalyzer
self.debugsp = args.debugsp
if args.nbytes == 0:
self.nbytes = 4
self.print_bytes = False
else:
self.nbytes = int(args.nbytes)
self.print_bytes = True
if self.raw_base is not None:
try:
self.raw_base = int(self.raw_base, 16)
except:
error("--rawbase must be in hex format")
die()
else:
self.raw_base = 0
def load_file(self, filename=None):
if filename is None:
filename = self.filename
if not os.path.exists(filename):
error("file {self.filename} doesn't exist".format(self=self))
if self.interactive_mode:
return False
die()
if not os.path.isfile(filename):
error("this is not a file".format(self=self))
if self.interactive_mode:
return False
die()
self.db = Database()
self.db.load(filename)
if self.raw_base != 0:
self.db.raw_base = self.raw_base
if self.raw_type is not None:
self.db.raw_type = self.raw_type
if self.raw_big_endian is not None:
self.db.raw_is_big_endian = self.raw_big_endian
if self.db.loaded:
self.raw_base = self.db.raw_base
self.raw_type = self.db.raw_type
self.raw_big_endian = self.db.raw_is_big_endian
try:
dis = Disassembler(filename, self.raw_type,
self.raw_base, self.raw_big_endian,
self.db)
except ExcArch as e:
error("arch %s is not supported" % e.arch)
if self.interactive_mode:
return False
die()
except ExcFileFormat:
error("the file is not PE or ELF binary")
if self.interactive_mode:
return False
die()
except ExcPEFail as e:
error(str(e.e))
error("it seems that there is a random bug in pefile, you shoul retry.")
error("please report here https://github.com/joelpx/plasma/issues/16")
if self.interactive_mode:
return False
die()
self.dis = dis
self.libarch = dis.load_arch_module()
return True
def get_addr_context(self, ad, quiet=False):
adctx = AddrContext(self)
if isinstance(ad, int):
adctx.entry = self.db.mem.get_head_addr(ad)
return adctx
ret = adctx.init_address(ad, quiet=quiet) # here ad is a string
if not ret:
return None
adctx.entry = self.db.mem.get_head_addr(adctx.entry)
return adctx
#
# This is a context for a disassembling at a specific address, it contains
# the graph, the output... It's always named as "ctx"
#
class AddrContext():
def __init__(self, gctx):
# TODO : let globally ?
plasma.lib.colors.ctx = self
self.gctx = gctx # Global context
self.entry = 0
self.addr_color = {}
self.color_counter = 112
self.seen = set()
# If an address of an instruction cmp is here, it means that we
# have fused with an if, so don't print this instruction.
self.all_fused_inst = set()
self.is_dump = False
self.gph = None
self.ast = None
def init_address(self, entry, quiet=False):
if isinstance(entry, int):
self.entry = entry
return True
if entry == "EP":
self.entry = self.gctx.dis.binary.get_entry_point()
return True
if entry is None:
if self.gctx.raw_type is not None:
self.entry = 0
return True
self.entry = self.gctx.db.symbols.get("main", None) or \
self.gctx.db.symbols.get("_main", None) or \
self.gctx.dis.binary.get_entry_point()
if self.entry is None:
error("symbol main or _main not found, try with EP")
if self.gctx.interactive_mode:
return False
die()
return True
is_hexa = entry.startswith("0x")
if not is_hexa and self.gctx.api.is_reserved_prefix(entry):
entry = entry[entry.index("_") + 1:]
is_hexa = True
if is_hexa:
try:
self.entry = int(entry, 16)
except:
if not quiet:
error("bad hexa string %s" % entry)
if self.gctx.interactive_mode:
return False
die()
return True
self.entry = self.gctx.db.demangled.get(entry, None) or \
self.gctx.db.symbols.get(entry, None) or \
self.gctx.dis.binary.section_names.get(entry, None)
if self.entry is None:
if not quiet:
error("symbol %s not found" % entry)
if self.gctx.interactive_mode:
return False
die()
return True
def decompile(self):
self.is_dump = False
self.gph, pe_nb_new_syms = self.gctx.dis.get_graph(self.entry)
if self.gph is None:
error("capstone can't disassemble here")
return None
self.gph.simplify()
if self.gctx.db.loaded and pe_nb_new_syms:
self.gctx.db.modified = True
try:
self.gph.loop_detection(self.entry)
ast, correctly_ended = generate_ast(self)
if not correctly_ended:
debug__("Second try...")
self.gph.loop_detection(self.entry, True)
ast, _ = generate_ast(self)
self.ast = ast
except ExcIfelse as e:
error("can't have a ifelse here %x" % e.addr)
if self.gctx.interactive_mode:
return None
die()
o = self.gctx.libarch.output.Output(self)
o._ast(self.entry, ast)
self.output = o
return o
def dump_asm(self, lines=NB_LINES_TO_DISASM, until=-1):
self.is_dump = True
o = self.gctx.dis.dump_asm(self, lines=lines, until=until)
self.output = o
return o
def dump_xrefs(self):
self.is_dump = True
o = self.gctx.dis.dump_xrefs(self, self.entry)
self.output = o
return o
| chubbymaggie/reverse | plasma/lib/__init__.py | Python | gpl-3.0 | 12,594 | 0.005717 |
from office365.entity import Entity
from office365.outlook.calendar.email_address import EmailAddress
class CalendarPermission(Entity):
"""
The permissions of a user with whom the calendar has been shared or delegated in an Outlook client.
Get, update, and delete of calendar permissions is supported on behalf of only the calendar owner.
Getting the calendar permissions of a calendar on behalf of a sharee or delegate returns
an empty calendar permissions collection.
Once a sharee or delegate has been set up for a calendar, you can update only the role property to change
the permissions of a sharee or delegate. You cannot update the allowedRoles, emailAddress, isInsideOrganization,
or isRemovable property. To change these properties, you should delete the corresponding calendarPermission
object and create another sharee or delegate in an Outlook client.
"""
@property
def email_address(self):
"""
Represents a sharee or delegate who has access to the calendar.
For the "My Organization" sharee, the address property is null. Read-only.
"""
return self.properties.get("emailAddress", EmailAddress())
| vgrem/Office365-REST-Python-Client | office365/outlook/calendar/calendar_permission.py | Python | mit | 1,204 | 0.005814 |
"""Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
from pkg_resources import resource_string
from xblock.fields import Boolean, Dict, Integer, List, Scope, String
from xblock.fragment import Fragment
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.x_module import XModule
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class WordCloudFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default="Word cloud"
)
instructions = String(
display_name=_("Instructions"),
help=_("Add instructions to help learners understand how to use the word cloud. Clear instructions are important, especially for learners who have accessibility requirements."), # nopep8 pylint: disable=C0301
scope=Scope.settings,
)
num_inputs = Integer(
display_name=_("Inputs"),
help=_("The number of text boxes available for learners to add words and sentences."),
scope=Scope.settings,
default=5,
values={"min": 1}
)
num_top_words = Integer(
display_name=_("Maximum Words"),
help=_("The maximum number of words displayed in the generated word cloud."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
display_student_percents = Boolean(
display_name=_("Show Percents"),
help=_("Statistics are shown for entered words near that word."),
scope=Scope.settings,
default=True
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this learner has posted words to the cloud."),
scope=Scope.user_state,
default=False
)
student_words = List(
help=_("Student answer."),
scope=Scope.user_state,
default=[]
)
all_words = Dict(
help=_("All possible words from all learners."),
scope=Scope.user_state_summary
)
top_words = Dict(
help=_("Top num_top_words words for word cloud."),
scope=Scope.user_state_summary
)
class WordCloudModule(WordCloudFields, XModule):
"""WordCloud Xmodule"""
js = {
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
],
}
css = {'scss': [resource_string(__name__, 'css/word_cloud/display.scss')]}
js_module_name = "WordCloud"
def get_state(self):
"""Return success json answer for client."""
if self.submitted:
total_count = sum(self.all_words.itervalues())
return json.dumps({
'status': 'success',
'submitted': True,
'display_student_percents': pretty_bool(
self.display_student_percents
),
'student_words': {
word: self.all_words[word] for word in self.student_words
},
'total_count': total_count,
'top_words': self.prepare_words(self.top_words, total_count)
})
else:
return json.dumps({
'status': 'success',
'submitted': False,
'display_student_percents': False,
'student_words': {},
'total_count': 0,
'top_words': {}
})
def good_word(self, word):
"""Convert raw word to suitable word."""
return word.strip().lower()
def prepare_words(self, top_words, total_count):
"""Convert words dictionary for client API.
:param top_words: Top words dictionary
:type top_words: dict
:param total_count: Total number of words
:type total_count: int
:rtype: list of dicts. Every dict is 3 keys: text - actual word,
size - counter of word, percent - percent in top_words dataset.
Calculates corrected percents for every top word:
For every word except last, it calculates rounded percent.
For the last is 100 - sum of all other percents.
"""
list_to_return = []
percents = 0
for num, word_tuple in enumerate(top_words.iteritems()):
if num == len(top_words) - 1:
percent = 100 - percents
else:
percent = round(100.0 * word_tuple[1] / total_count)
percents += percent
list_to_return.append(
{
'text': word_tuple[0],
'size': word_tuple[1],
'percent': percent
}
)
return list_to_return
def top_dict(self, dict_obj, amount):
"""Return top words from all words, filtered by number of
occurences
:param dict_obj: all words
:type dict_obj: dict
:param amount: number of words to be in top dict
:type amount: int
:rtype: dict
"""
return dict(
sorted(
dict_obj.items(),
key=lambda x: x[1],
reverse=True
)[:amount]
)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
raw_student_words = data.getall('student_words[]')
student_words = filter(None, map(self.good_word, raw_student_words))
self.student_words = student_words
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
# speed issues
temp_all_words = self.all_words
self.submitted = True
# Save in all_words.
for word in self.student_words:
temp_all_words[word] = temp_all_words.get(word, 0) + 1
# Update top_words.
self.top_words = self.top_dict(
temp_all_words,
self.num_top_words
)
# Save all_words in database.
self.all_words = temp_all_words
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def student_view(self, context):
"""
Renders the output that a student will see.
"""
fragment = Fragment()
fragment.add_content(self.system.render_template('word_cloud.html', {
'ajax_url': self.system.ajax_url,
'display_name': self.display_name,
'instructions': self.instructions,
'element_class': self.location.category,
'element_id': self.location.html_id(),
'num_inputs': self.num_inputs,
'submitted': self.submitted,
}))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/d3.min.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/d3.layout.cloud.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/word_cloud.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/word_cloud_main.js'))
return fragment
def author_view(self, context):
"""
Renders the output that an author will see.
"""
return self.student_view(context)
class WordCloudDescriptor(WordCloudFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""Descriptor for WordCloud Xmodule."""
module_class = WordCloudModule
resources_dir = 'assets/word_cloud'
template_dir_name = 'word_cloud'
| pepeportela/edx-platform | common/lib/xmodule/xmodule/word_cloud_module.py | Python | agpl-3.0 | 8,926 | 0.001232 |
# -*- coding: utf-8 -*-
from copy import deepcopy
from cfme.utils import conf
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
class FromConfigMixin(object):
@staticmethod
def rename_properties(creds):
"""
helper function to make properties have same names in credential objects.
Args:
creds: dict
Returns: updated dict
"""
creds = deepcopy(creds)
to_rename = [('password', 'secret'), ('username', 'principal')]
for key1, key2 in to_rename:
if key1 in creds:
creds[key2] = creds[key1]
del creds[key1]
return creds
@classmethod
def from_config(cls, key):
"""
helper function which allows to construct credential object from credentials.eyaml
Args:
key: credential key
Returns: credential object
"""
creds = cls.rename_properties(conf.credentials[key])
return cls(**creds)
@classmethod
def from_plaintext(cls, creds):
"""
helper function which allows to construct credential class from plaintext dict
Args:
creds: dict
Returns: credential object
"""
creds = cls.rename_properties(creds)
return cls(**creds)
class Credential(Pretty, Updateable, FromConfigMixin):
"""
A class to fill in credentials
Args:
principal: user name
secret: password
verify_secret: password
domain: concatenated with principal if defined
"""
pretty_attrs = ['principal', 'secret']
def __init__(self, principal, secret, verify_secret=None, domain=None,
tenant_id=None, subscription_id=None, **ignore):
self.principal = principal
self.secret = secret
self.verify_secret = verify_secret
self.domain = domain
self.tenant_id = tenant_id
self.subscription_id = subscription_id
def __getattribute__(self, attr):
if attr == 'verify_secret':
if object.__getattribute__(self, 'verify_secret') is None:
return object.__getattribute__(self, 'secret')
else:
return object.__getattribute__(self, 'verify_secret')
elif attr == 'principal':
domain = object.__getattribute__(self, 'domain')
principal = object.__getattribute__(self, 'principal')
return r'{}\{}'.format(domain, principal) if domain else principal
else:
return super(Credential, self).__getattribute__(attr)
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'username': self.principal,
'password': self.secret,
'confirm_password': None
}
def __eq__(self, other):
if other is None:
return False
return self.principal == other.principal and self.secret == other.secret and \
self.verify_secret == other.verify_secret
def __ne__(self, other):
return not self.__eq__(other)
class EventsCredential(Credential):
pass
class CANDUCredential(Credential):
pass
class AzureCredential(Credential):
pass
class SSHCredential(Credential):
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'username': self.principal,
'private_key': self.secret,
}
class TokenCredential(Pretty, Updateable, FromConfigMixin):
"""
A class to fill in credentials
Args:
token: identification token
verify_token: token once more
"""
pretty_attrs = ['token']
def __init__(self, token, verify_token=None, **kwargs):
self.token = token
self.verify_token = verify_token
for name, value in kwargs.items():
setattr(self, name, value)
def __getattribute__(self, attr):
if attr == 'verify_token':
if object.__getattribute__(self, 'verify_token') is not None:
return object.__getattribute__(self, 'verify_token')
else:
return object.__getattribute__(self, 'token')
else:
return super(TokenCredential, self).__getattribute__(attr)
def __eq__(self, other):
return self.token == other.token and self.verify_token == other.verify_token
def __ne__(self, other):
return not self.__eq__(other)
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'token': self.token,
'verify_token': None
}
class ServiceAccountCredential(Pretty, Updateable):
"""
A class to fill in credentials
Args:
service_account: service account string
"""
pretty_attrs = ['service_account']
def __init__(self, service_account):
super(ServiceAccountCredential, self)
self.service_account = service_account
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'service_account': self.service_account
}
def __eq__(self, other):
return self.service_account == other.service_account
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_config(cls, key):
# TODO: refactor this. consider json.dumps
creds = deepcopy(conf.credentials[key])
service_data = creds['service_account']
service_account = '''
"type": "{type}",
"project_id": "{project}",
"private_key_id": "{private_key_id}",
"private_key": "{private_key}",
"client_email": "{email}",
"client_id": "{client}",
"auth_uri": "{auth}",
"token_uri": "{token}",
"auth_provider_x509_cert_url": "{auth_provider}",
"client_x509_cert_url": "{cert_url}"
'''.format(
type=service_data.get('type'),
project=service_data.get('project_id'),
private_key_id=service_data.get('private_key_id'),
private_key=service_data.get('private_key').replace('\n', '\\n'),
email=service_data.get('client_email'),
client=service_data.get('client_id'),
auth=service_data.get('auth_uri'),
token=service_data.get('token_uri'),
auth_provider=service_data.get('auth_provider_x509_cert_url'),
cert_url=service_data.get('client_x509_cert_url'))
service_account = '{' + service_account + '}'
return cls(service_account=service_account)
| anurag03/integration_tests | cfme/base/credential.py | Python | gpl-2.0 | 7,024 | 0.000712 |
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
from make_mozilla.core import fields
class Page(models.Model):
title = models.CharField(max_length=255)
path = models.SlugField()
real_path = models.CharField(max_length=1024, unique=True, blank=True)
parent = models.ForeignKey('self', blank=True, null=True,
help_text='This will allow you to use URLs like /about/foo - parent.path + path',
related_name='children')
show_subnav = models.BooleanField(default=False,
verbose_name='Show sub-navigation menu')
subnav_title = models.CharField(max_length=100, blank=True, null=True,
verbose_name='Menu title', help_text='This can be left blank if you do not need a title')
additional_content = models.TextField(blank=True, null=True)
def has_ancestor(self, page):
if not self.parent:
return False
if self.parent.id == page.id:
return True
return self.parent.has_ancestor(page)
def get_section_root(self):
return self.real_path.split('/')[0]
def clean(self):
self.path = self.path.strip('/')
if self.parent:
if self.parent.has_ancestor(self):
raise ValidationError('Cannot set page parent to one of its descendants')
self.real_path = '%s/%s' % (self.parent.real_path, self.path)
else:
self.real_path = self.path
try:
if Page.objects.exclude(id__exact=self.id).get(real_path=self.real_path):
raise ValidationError('This path/parent combination already exists.')
except Page.DoesNotExist:
# We can safely ignore this, as it means we're in the clear and our path is fine
pass
def save(self, *args, **kwargs):
super(Page, self).save(*args, **kwargs)
# Now we tell our children to update their real paths
# This will happen recursively, so we don't need to worry about that logic here
for child in self.children.all():
child.real_path = '%s/%s' % (self.real_path, child.path)
child.save()
def __unicode__(self):
return self.title
@property
def indented_title(self):
indent = len(self.real_path.split('/')) - 1
if not indent:
return self.title
return '%s %s' % ('-' * indent, self.title)
def get_absolute_url(self):
return reverse('page', args=[self.real_path])
class PageSection(models.Model):
title = models.CharField(max_length=255)
subnav_title = models.CharField(max_length=255, blank=True, null=True,
verbose_name='Sub-navigation title', help_text='Will use the section title if blank')
page = models.ForeignKey('Page', related_name='sections')
poster = fields.SizedImageField(
blank=True,
null=True,
upload_to='pages',
storage=FileSystemStorage(**settings.UPLOADED_IMAGES),
sizes={
'standard': 900,
'tablet': 700,
'handheld': 500,
})
content = models.TextField()
sidebar = models.TextField(blank=True, null=True)
quotes = models.ManyToManyField('Quote', blank=True, null=True)
class Meta:
verbose_name = 'section'
ordering = ['id']
def __unicode__(self):
return mark_safe(self.title)
@property
def nav_title(self):
if self.subnav_title:
return mark_safe(self.subnav_title)
return unicode(self)
@property
def has_sidebar(self):
return self.sidebar or self.quotes.count()
class Quote(models.Model):
quote = models.CharField(max_length=1000)
source = models.ForeignKey('QuoteSource', blank=True, null=True)
url = models.URLField(blank=True, null=True, verbose_name='URL')
show_source_image = models.BooleanField(default=False, help_text='Show the source\'s image next to this quote, if available')
@property
def clean_quote(self):
return strip_tags(self.quote)
def __unicode__(self):
quote = self.clean_quote
if len(quote) > 25:
quote = quote[:25] + '...'
if not self.source:
return quote
return '%s (%s)' % (quote, self.source.name)
class QuoteSource(models.Model):
name = models.CharField(max_length=255)
strapline = models.CharField(max_length=255, blank=True, null=True, help_text='"Teacher", "CEO, MegaCorp", ...')
url = models.URLField(blank=True, null=True, verbose_name='URL')
avatar = fields.SizedImageField(
blank=True,
null=True,
verbose_name='Image',
upload_to='avatars',
storage=FileSystemStorage(**settings.UPLOADED_IMAGES),
sizes={
'adjusted': (90,90),
})
class Meta:
verbose_name = 'source'
def __unicode__(self):
if self.strapline:
return '%s - %s' % (self.name, self.strapline)
return self.name
| mozilla/make.mozilla.org | make_mozilla/pages/models.py | Python | bsd-3-clause | 5,208 | 0.003072 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DiagnosticSettingsOperations:
"""DiagnosticSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2017_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_uri: str,
name: str,
**kwargs: Any
) -> "_models.DiagnosticSettingsResource":
"""Gets the active diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-05-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
async def create_or_update(
self,
resource_uri: str,
name: str,
parameters: "_models.DiagnosticSettingsResource",
**kwargs: Any
) -> "_models.DiagnosticSettingsResource":
"""Creates or updates diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:param parameters: Parameters supplied to the operation.
:type parameters: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-05-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DiagnosticSettingsResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
async def delete(
self,
resource_uri: str,
name: str,
**kwargs: Any
) -> None:
"""Deletes existing diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-05-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
async def list(
self,
resource_uri: str,
**kwargs: Any
) -> "_models.DiagnosticSettingsResourceCollection":
"""Gets the active diagnostic settings list for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResourceCollection, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResourceCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-05-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResourceCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings'} # type: ignore
| Azure/azure-sdk-for-python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/aio/operations/_diagnostic_settings_operations.py | Python | mit | 12,838 | 0.004674 |
import datetime
import functools
import os
import random
import shutil
import tempfile
import time
from urllib import request
import faker
import magic
import pendulum
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import tag
from django.urls import reverse
from django.utils import timezone
from openpyxl import load_workbook
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from attribution.tests.factories.attribution import AttributionFactory
from base.tests.factories.academic_calendar import (AcademicCalendarExamSubmissionFactory,
AcademicCalendarFactory)
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.exam_enrollment import ExamEnrollmentFactory
from base.tests.factories.learning_unit import LearningUnitFactory
from base.tests.factories.learning_unit_enrollment import \
LearningUnitEnrollmentFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFactory
from base.tests.factories.offer_enrollment import OfferEnrollmentFactory
from base.tests.factories.offer_year import OfferYearFactory
from base.tests.factories.offer_year_calendar import OfferYearCalendarFactory
from base.tests.factories.person import PersonFactory
from base.tests.factories.program_manager import ProgramManagerFactory
from base.tests.factories.session_exam_calendar import \
SessionExamCalendarFactory
from base.tests.factories.session_examen import SessionExamFactory
from base.tests.factories.student import StudentFactory
from base.tests.factories.tutor import TutorFactory
from base.tests.factories.user import SuperUserFactory, UserFactory
class BusinessMixin:
def create_user(self):
return UserFactory()
def create_super_user(self):
return SuperUserFactory()
def add_group(self, user, *group_names):
for name in group_names:
group, created = Group.objects.get_or_create(name=name)
group.user_set.add(user)
def add_permission(self, user, *permission_names):
for permission_name in permission_names:
if '.' in permission_name:
label, codename = permission_name.split('.')
permission = Permission.objects.get(codename=codename, content_type__app_label=label)
else:
permission = Permission.objects.get(codename=permission_name)
user.user_permissions.add(permission)
@tag('selenium')
class SeleniumTestCase(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.sel_settings = settings.SELENIUM_SETTINGS
print("### Virtual Display : {}".format(cls.sel_settings.get('VIRTUAL_DISPLAY')))
cls.screen_size = (cls.sel_settings.get('SCREEN_WIDTH'), cls.sel_settings.get('SCREEN_HIGH'))
cls.full_path_temp_dir = tempfile.mkdtemp('osis-selenium')
if cls.sel_settings.get('WEB_BROWSER').upper() == 'FIREFOX':
fp = webdriver.FirefoxProfile()
fp.set_preference('browser.download.dir', cls.full_path_temp_dir)
fp.set_preference('browser.download.folderList', 2)
fp.set_preference('browser.download.manager.showWhenStarting', False)
fp.set_preference('pdfjs.disabled', True)
known_mimes = ['application/vnd.ms-excel',
'application/pdf',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
fp.set_preference('browser.helperApps.neverAsk.saveToDisk', ','.join(known_mimes))
options = Options()
if cls.sel_settings.get('VIRTUAL_DISPLAY'):
options.add_argument('-headless')
cls.driver = webdriver.Firefox(executable_path=cls.sel_settings.get('GECKO_DRIVER'),
firefox_profile=fp, firefox_options=options)
if cls.sel_settings.get('WEB_BROWSER').upper() == 'CHROME':
options = webdriver.ChromeOptions()
if cls.sel_settings.get('VIRTUAL_DISPLAY'):
options.add_argument('-headless')
options.add_experimental_option('prefs', {
'download.default_directory': cls.full_path_temp_dir,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': True
})
cls.driver = webdriver.Chrome(chrome_options=options)
cls.driver.implicitly_wait(10)
cls.driver.set_window_size(*cls.screen_size)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.full_path_temp_dir)
cls.driver.quit()
super().tearDownClass()
def get_url_by_name(self, url_name, *args, **kwargs):
return request.urljoin(self.live_server_url, reverse(url_name, args=args, kwargs=kwargs))
def goto(self, url_name, *args, **kwargs):
url = self.get_url_by_name(url_name, *args, **kwargs)
self.driver.get(url)
def fill_by_id(self, field_id, value):
field = self.driver.find_element_by_id(field_id)
field.clear()
field.send_keys(value)
def login(self, username, password='password123'):
self.goto('login')
self.fill_by_id('id_username', username)
self.fill_by_id('id_password', password)
self.click_on('post_login_btn')
def click_on(self, id_):
self.driver.find_element_by_id(id_).click()
def get_element(self, id_):
return self.driver.find_element_by_id(id_)
def get_element_text(self, id_):
return self.get_element(id_).text
def assertElementTextEqualInt(self, id_, value):
self.assertEqual(int(self.get_element_text(id_)), value)
def assertElementTextEqual(self, id_, text):
self.assertEqual(self.get_element_text(id_), text)
def assertCurrentUrl(self, url_name, *args, **kwargs):
self.assertEqual(
self.driver.current_url,
self.get_url_by_name(url_name, *args, **kwargs)
)
class FunctionalTest(SeleniumTestCase, BusinessMixin):
def test_01_scenario_modifier_periode_encoding(self):
user = self.create_super_user()
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarFactory.build(academic_year=academic_year)
academic_calendar.save()
self.login(user.username)
self.goto('academic_calendar_read', academic_calendar_id=academic_calendar.id)
self.click_on('bt_academic_calendar_edit')
new_date = academic_calendar.start_date - datetime.timedelta(days=5)
new_date_str = new_date.strftime('%m/%d/%Y')
self.fill_by_id('txt_start_date', new_date_str)
self.driver.execute_script("scroll(0, 250)")
self.click_on('bt_academic_calendar_save')
self.assertCurrentUrl('academic_calendar_form',academic_calendar_id=academic_calendar.id)
self.assertElementTextEqual('ac_start_date', new_date_str)
def test_01_scenario_modifier_period_encoding_date_fin(self):
user = self.create_super_user()
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarFactory.build(academic_year=academic_year)
academic_calendar.save()
self.login(user.username)
self.goto('academic_calendar_read', academic_calendar_id=academic_calendar.id)
self.click_on('bt_academic_calendar_edit')
new_date = academic_calendar.end_date + datetime.timedelta(days=5)
new_date_str = new_date.strftime('%m/%d/%Y')
self.fill_by_id('txt_end_date', new_date_str)
self.driver.execute_script("scroll(0, 250)")
self.click_on('bt_academic_calendar_save')
self.assertCurrentUrl('academic_calendar_form', academic_calendar_id=academic_calendar.id)
self.assertElementTextEqual('ac_end_date', new_date_str)
def test_03(self):
user = self.create_user()
self.add_group(user, 'program_managers')
self.add_permission(user, 'assessments.can_access_scoreencoding')
start_date = timezone.now() + datetime.timedelta(days=20)
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarExamSubmissionFactory.build(
academic_year=academic_year,
start_date=start_date,
end_date=start_date + datetime.timedelta(days=10),
)
academic_calendar.save()
person = PersonFactory(user=user, language='fr-be')
offer_year = OfferYearFactory(academic_year=academic_year)
ProgramManagerFactory(offer_year=offer_year, person=person)
sec = SessionExamCalendarFactory(academic_calendar=academic_calendar)
self.login(user.username)
self.goto('scores_encoding')
warning_messages = self.driver.find_element_by_id('pnl_warning_messages')
self.assertEqual(
warning_messages.text,
"La période d'encodage des notes pour la session {} sera ouverte à partir du {}".
format(str(sec.number_session), academic_calendar.start_date.strftime('%d/%m/%Y'))
)
def test_04(self):
user = self.create_user()
self.add_group(user, 'program_managers')
self.add_permission(user, 'can_access_academic_calendar', 'assessments.can_access_scoreencoding')
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarExamSubmissionFactory.build(academic_year=academic_year)
academic_calendar.save()
person = PersonFactory(user=user, language='fr-be')
offer_year_factory = functools.partial(OfferYearFactory, academic_year=academic_year)
acronyms = ['PHYS11BA', 'ECON2M1', 'PHYS1BA', 'PHYS2M1', 'PHYS2MA']
offers = {
acronym: offer_year_factory(acronym=acronym)
for acronym in acronyms
}
program_manager_factory = functools.partial(
ProgramManagerFactory, person=person
)
for acronym, offer_year in offers.items():
program_manager_factory(offer_year=offer_year)
student1 = StudentFactory()
student2 = StudentFactory()
student3 = StudentFactory()
student10 = StudentFactory()
student11 = StudentFactory()
student12 = StudentFactory()
student13 = StudentFactory()
student14 = StudentFactory()
student15 = StudentFactory()
student16 = StudentFactory()
offer_enrollment1 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student1)
offer_enrollment2 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student2)
offer_enrollment3 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student3)
offer_enrollment10 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student10)
offer_enrollment11 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student11)
offer_enrollment12 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student12)
offer_enrollment13 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student13)
offer_enrollment14 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student14)
offer_enrollment15 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student15)
offer_enrollment16 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student16)
offer_enrollment4 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student1)
offer_enrollment5 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student2)
# unité d'enseignement = learning_unit_year
learning_unit = LearningUnitFactory()
learning_unit_year_1 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit)
learning_unit2 = LearningUnitFactory()
learning_unit_year_2 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit2)
learning_unit3 = LearningUnitFactory()
learning_unit_year_3 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit3)
learning_unit_enrollment1 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment2 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment2, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment3 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment3, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment10 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment10, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment11 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment11, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment12 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment12, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment13 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment13, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment14 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment14, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment15 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment15, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment16 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment16, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment4 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment4, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment5 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment6 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_3)
learning_unit_enrollment7 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_3)
session_exam_calendar = SessionExamCalendarFactory(academic_calendar=academic_calendar)
session_exam_phys11ba = SessionExamFactory(learning_unit_year=learning_unit_year_1, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
session_exam_econ2m1 = SessionExamFactory(learning_unit_year=learning_unit_year_2, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_3 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_4 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['ECON2M1'])
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment1, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment2, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment3, session_exam=session_exam_phys11ba)
exam_enrollment_10 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment10, session_exam=session_exam_phys11ba)
exam_enrollment_11 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment11, session_exam=session_exam_phys11ba)
exam_enrollment_12 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment12, session_exam=session_exam_phys11ba)
exam_enrollment_13 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment13, session_exam=session_exam_phys11ba)
exam_enrollment_14 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment14, session_exam=session_exam_phys11ba)
exam_enrollment_15 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment15, session_exam=session_exam_phys11ba)
exam_enrollment_16 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment16, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment4, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment5, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment6, session_exam=session_exam_3)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment7, session_exam=session_exam_4)
self.login(user.username)
self.goto('scores_encoding')
select = Select(self.get_element('slt_offer_list_selection'))
all_options = set(option.text for option in select.options)
all_offers = set(offers.keys())
self.assertSetEqual({'Tout'}, all_options - all_offers)
self.assertElementTextEqualInt('scores_encoding_learning_units', 3)
self.assertEqual(len(all_options - {'Tout'}), 5)
self.click_on('lnk_encode_{}'.format(learning_unit_year_1.id))
# progression = self.driver.find_element_by_id('luy_progression').text
# self.assertEqual(progression, '0 / 10')
self.assertFalse(learning_unit_year_1.decimal_scores)
self.assertElementTextEqual(
'message_decimal_accepted',
"Les notes de ce cours ne peuvent PAS recevoir de valeurs décimales."
)
self.assertElementTextEqualInt('number_of_enrollments', 10)
the_first = 1
element = self.driver.find_element_by_css_selector("[tabindex='%d']" % the_first)
element_id = element.get_attribute('id')
element.clear()
element.send_keys(12)
enrollment_id = int(element_id.split('_')[-1])
self.click_on('bt_save_online_encoding_up')
self.assertElementTextEqual('luy_progression', '1 / 10')
self.assertElementTextEqualInt('enrollment_note_{}'.format(enrollment_id), 12)
element = self.driver.find_element_by_css_selector('td#enrollment_status_{} span'.format(enrollment_id))
self.assertIn('glyphicon-send', element.get_attribute('class').split())
self.click_on('lnk_encode')
note_enrollments = {}
for counter in range(2, 11):
element = self.driver.find_element_by_css_selector("[tabindex='%d']" % counter)
element_id = element.get_attribute('id')
enrollment_id = int(element_id.split('_')[-1])
self.fill_by_id(element_id, counter)
note_enrollments[enrollment_id] = counter
self.click_on('bt_save_online_encoding_up')
self.assertElementTextEqual('luy_progression', '10 / 10')
for enrollment_id, value in note_enrollments.items():
self.assertElementTextEqualInt('enrollment_note_{}'.format(enrollment_id), value)
self.click_on('lnk_encode')
note_enrollments = set()
for counter in range(1, 11):
element = self.driver.find_element_by_css_selector("[tabindex='%d']" % counter)
element_id = element.get_attribute('id')
enrollment_id = int(element_id.split('_')[-1])
element.clear()
note_enrollments.add(enrollment_id)
self.click_on('bt_save_online_encoding_up')
self.assertElementTextEqual('luy_progression', '0 / 10')
for enrollment_id in note_enrollments:
self.assertElementTextEqual('enrollment_note_{}'.format(enrollment_id), '-')
def test_05(self):
user = self.create_user()
self.add_group(user, 'program_managers', 'tutors')
self.add_permission(user, 'can_access_academic_calendar', 'assessments.can_access_scoreencoding')
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarExamSubmissionFactory.build(academic_year=academic_year)
academic_calendar.save()
person = PersonFactory(
user=user,
first_name=user.first_name,
last_name=user.last_name,
language='fr-be'
)
offer_year_factory = functools.partial(OfferYearFactory, academic_year=academic_year)
acronyms = ['PHYS11BA', 'ECON2M1', 'PHYS1BA', 'PHYS2M1', 'PHYS2MA']
offers = {
acronym: offer_year_factory(acronym=acronym)
for acronym in acronyms
}
program_manager_factory = functools.partial(
ProgramManagerFactory, person=person
)
for acronym, offer_year in offers.items():
program_manager_factory(offer_year=offer_year)
student1 = StudentFactory()
student2 = StudentFactory()
student3 = StudentFactory()
student10 = StudentFactory()
student11 = StudentFactory()
student12 = StudentFactory()
student13 = StudentFactory()
student14 = StudentFactory()
student15 = StudentFactory()
student16 = StudentFactory()
offer_enrollment1 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student1)
offer_enrollment2 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student2)
offer_enrollment3 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student3)
offer_enrollment10 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student10)
offer_enrollment11 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student11)
offer_enrollment12 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student12)
offer_enrollment13 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student13)
offer_enrollment14 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student14)
offer_enrollment15 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student15)
offer_enrollment16 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student16)
offer_enrollment4 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student1)
offer_enrollment5 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student2)
learning_unit = LearningUnitFactory()
learning_unit_year_1 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit)
tutor = TutorFactory(person=person)
attribution = AttributionFactory(
tutor=tutor, learning_unit_year=learning_unit_year_1,
score_responsible=True
)
learning_unit2 = LearningUnitFactory()
learning_unit_year_2 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit2)
learning_unit3 = LearningUnitFactory()
learning_unit_year_3 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit3)
learning_unit_enrollment1 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment2 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment2, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment3 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment3, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment10 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment10, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment11 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment11, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment12 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment12, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment13 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment13, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment14 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment14, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment15 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment15, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment16 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment16, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment4 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment4, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment5 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment6 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_3)
learning_unit_enrollment7 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_3)
session_exam_calendar = SessionExamCalendarFactory(academic_calendar=academic_calendar)
session_exam_phys11ba = SessionExamFactory(learning_unit_year=learning_unit_year_1, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
session_exam_econ2m1 = SessionExamFactory(learning_unit_year=learning_unit_year_2, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_3 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_4 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['ECON2M1'])
exam_enrollment_1 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment1, session_exam=session_exam_phys11ba)
exam_enrollment_2 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment2, session_exam=session_exam_phys11ba)
exam_enrollment_3 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment3, session_exam=session_exam_phys11ba)
exam_enrollment_10 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment10, session_exam=session_exam_phys11ba)
exam_enrollment_11 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment11, session_exam=session_exam_phys11ba)
exam_enrollment_12 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment12, session_exam=session_exam_phys11ba)
exam_enrollment_13 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment13, session_exam=session_exam_phys11ba)
exam_enrollment_14 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment14, session_exam=session_exam_phys11ba)
exam_enrollment_15 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment15, session_exam=session_exam_phys11ba)
exam_enrollment_16 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment16, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment4, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment5, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment6, session_exam=session_exam_3)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment7, session_exam=session_exam_4)
self.login(user.username)
self.goto('scores_encoding')
select = Select(self.driver.find_element_by_id('slt_offer_list_selection'))
select.select_by_visible_text('PHYS11BA')
self.click_on('bt_submit_offer_search')
time.sleep(1)
self.click_on('lnk_via_excel')
time.sleep(1)
self.click_on('lnk_scores_encoding_download_{}'.format(learning_unit_year_1.id))
time.sleep(1)
filename = 'session_{}_{}_{}.xlsx'.format(academic_year.year,
session_exam_calendar.number_session,
learning_unit_year_1.acronym)
full_path = os.path.join(self.full_path_temp_dir, filename)
self.assertTrue(os.path.exists(full_path))
exam_enrollments = [
exam_enrollment_1, exam_enrollment_2, exam_enrollment_3,
exam_enrollment_10, exam_enrollment_11, exam_enrollment_12,
exam_enrollment_13, exam_enrollment_14, exam_enrollment_15,
exam_enrollment_16
]
updated_values = self.update_xlsx(full_path, exam_enrollments)
self.goto('online_encoding', learning_unit_year_id=learning_unit_year_1.id)
self.driver.save_screenshot(os.path.join(self.full_path_temp_dir, 'scenario5-before_xls.png'))
self.click_on('bt_upload_score_modal')
time.sleep(1)
self.driver.execute_script("document.getElementById('fle_scores_input_file').style.display = 'block'")
self.fill_by_id('fle_scores_input_file', full_path)
time.sleep(1)
self.click_on('bt_submit_upload_score_modal')
self.assertElementTextEqual('luy_progression', '10 / 10')
self.driver.save_screenshot(os.path.join(self.full_path_temp_dir, 'scenario5-final.png'))
for enrollment_id, (key, value) in updated_values.items():
element_id = 'enrollment_{}_{}'.format(key, enrollment_id)
value = {'T': 'Tricherie', 'A': 'Absence injustifiée'}.get(value, value)
self.assertElementTextEqual(element_id, str(value))
def update_xlsx(self, filename, exam_enrollments):
fake = faker.Faker()
wb = load_workbook(filename)
enrollments = {}
sheet = wb.active
if sheet.max_row > 11:
start_row = 12
nomas = {
enrollment.learning_unit_enrollment.offer_enrollment.student.registration_id: {
'enrollment': enrollment,
'position': None
}
for enrollment in exam_enrollments
}
for counter in range(len(exam_enrollments)):
noma = sheet['E{}'.format(counter + start_row)].value
nomas[noma]['position'] = counter
for noma, info in nomas.items():
left_or_right = bool(random.getrandbits(1))
selected_column = 'I' if left_or_right else 'J'
if left_or_right:
value = random.randint(0, 20)
key = 'note'
else:
value = fake.random_element(elements=('A', 'T'))
key = 'justification'
sheet['{}{}'.format(selected_column, info['position'] + start_row)] = value
enrollments[info['enrollment'].id] = key, value
wb.save(filename=filename)
return enrollments
def test_06(self):
user = self.create_user()
self.add_group(user, 'program_managers', 'tutors')
self.add_permission(user, 'can_access_academic_calendar', 'assessments.can_access_scoreencoding')
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarExamSubmissionFactory.build(academic_year=academic_year)
academic_calendar.save()
person = PersonFactory(
user=user,
first_name=user.first_name,
last_name=user.last_name,
language='fr-be'
)
offer_year_factory = functools.partial(OfferYearFactory, academic_year=academic_year)
acronyms = ['PHYS11BA', 'ECON2M1', 'PHYS1BA', 'PHYS2M1', 'PHYS2MA']
offers = {
acronym: offer_year_factory(acronym=acronym)
for acronym in acronyms
}
program_manager_factory = functools.partial(
ProgramManagerFactory, person=person
)
for acronym, offer_year in offers.items():
program_manager_factory(offer_year=offer_year)
student1 = StudentFactory()
student2 = StudentFactory()
student3 = StudentFactory()
student10 = StudentFactory()
student11 = StudentFactory()
student12 = StudentFactory()
student13 = StudentFactory()
student14 = StudentFactory()
student15 = StudentFactory()
student16 = StudentFactory()
offer_enrollment1 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student1)
offer_enrollment2 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student2)
offer_enrollment3 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student3)
offer_enrollment10 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student10)
offer_enrollment11 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student11)
offer_enrollment12 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student12)
offer_enrollment13 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student13)
offer_enrollment14 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student14)
offer_enrollment15 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student15)
offer_enrollment16 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student16)
offer_enrollment4 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student1)
offer_enrollment5 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student2)
# unité d'enseignement = learning_unit_year
learning_unit = LearningUnitFactory()
learning_unit_year_1 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit)
tutor = TutorFactory(person=person)
attribution = AttributionFactory(
tutor=tutor, learning_unit_year=learning_unit_year_1,
score_responsible=True
)
learning_unit2 = LearningUnitFactory()
learning_unit_year_2 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit2)
learning_unit3 = LearningUnitFactory()
learning_unit_year_3 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit3)
learning_unit_enrollment1 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment2 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment2, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment3 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment3, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment10 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment10, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment11 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment11, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment12 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment12, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment13 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment13, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment14 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment14, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment15 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment15, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment16 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment16, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment4 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment4, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment5 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment6 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_3)
learning_unit_enrollment7 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_3)
session_exam_calendar = SessionExamCalendarFactory(academic_calendar=academic_calendar)
session_exam_phys11ba = SessionExamFactory(learning_unit_year=learning_unit_year_1, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
session_exam_econ2m1 = SessionExamFactory(learning_unit_year=learning_unit_year_2, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_3 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_4 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['ECON2M1'])
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment1, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment2, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment3, session_exam=session_exam_phys11ba)
exam_enrollment_10 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment10, session_exam=session_exam_phys11ba)
exam_enrollment_11 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment11, session_exam=session_exam_phys11ba)
exam_enrollment_12 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment12, session_exam=session_exam_phys11ba)
exam_enrollment_13 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment13, session_exam=session_exam_phys11ba)
exam_enrollment_14 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment14, session_exam=session_exam_phys11ba)
exam_enrollment_15 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment15, session_exam=session_exam_phys11ba)
exam_enrollment_16 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment16, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment4, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment5, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment6, session_exam=session_exam_3)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment7, session_exam=session_exam_4)
self.login(user.username)
self.goto('scores_encoding')
self.assertElementTextEqualInt('scores_encoding_learning_units', 3)
self.assertElementTextEqualInt('scores_encoding_programs', 5)
select = Select(self.get_element('slt_offer_list_selection'))
all_options = set(option.text for option in select.options)
all_offers = set(offers.keys())
self.assertSetEqual({'Tout'}, all_options - all_offers)
self.assertElementTextEqualInt('scores_encoding_learning_units', 3)
self.assertEqual(len(all_options - {'Tout'}), 5)
self.click_on('lnk_encode_{}'.format(learning_unit_year_1.id))
self.assertElementTextEqualInt('number_of_enrollments', 10)
note_enrollments = {}
for counter in range(1, 11):
element = self.driver.find_element_by_css_selector("[tabindex='%d']" % counter)
element_id = element.get_attribute('id')
enrollment_id = int(element_id.split('_')[-1])
self.fill_by_id(element_id, counter)
note_enrollments[enrollment_id] = counter
self.click_on('bt_save_online_encoding_up')
self.assertElementTextEqual('luy_progression', '10 / 10')
for enrollment_id, value in note_enrollments.items():
self.assertElementTextEqualInt('enrollment_note_{}'.format(enrollment_id), value)
self.click_on('lnk_online_double_encoding')
for enrollment_id, value in note_enrollments.items():
self.fill_by_id('num_double_score_{}'.format(enrollment_id), str(value + 2))
self.click_on('bt_compare_up')
self.driver.execute_script("scroll(0, document.body.scrollHeight)")
for enrollment_id in note_enrollments:
self.click_on('bt_take_reencoded_{}'.format(enrollment_id))
self.click_on('bt_submit_online_double_encoding_validation')
self.driver.execute_script("scroll(0, document.body.scrollHeight)")
for enrollment_id, value in note_enrollments.items():
self.assertElementTextEqualInt('enrollment_note_{}'.format(enrollment_id), value + 2)
def test_07(self):
user = self.create_user()
self.add_group(user, 'program_managers', 'tutors')
self.add_permission(user, 'can_access_academic_calendar', 'assessments.can_access_scoreencoding')
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarExamSubmissionFactory.build(academic_year=academic_year)
academic_calendar.save()
person = PersonFactory(
user=user,
first_name=user.first_name,
last_name=user.last_name,
language='fr-be'
)
offer_year_factory = functools.partial(OfferYearFactory, academic_year=academic_year)
acronyms = ['PHYS11BA', 'ECON2M1', 'PHYS1BA', 'PHYS2M1', 'PHYS2MA']
offers = {
acronym: offer_year_factory(acronym=acronym)
for acronym in acronyms
}
program_manager_factory = functools.partial(
ProgramManagerFactory, person=person
)
for acronym, offer_year in offers.items():
program_manager_factory(offer_year=offer_year)
student1 = StudentFactory()
student2 = StudentFactory()
student3 = StudentFactory()
student10 = StudentFactory()
student11 = StudentFactory()
student12 = StudentFactory()
student13 = StudentFactory()
student14 = StudentFactory()
student15 = StudentFactory()
student16 = StudentFactory()
offer_enrollment1 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student1)
offer_enrollment2 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student2)
offer_enrollment3 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student3)
offer_enrollment10 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student10)
offer_enrollment11 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student11)
offer_enrollment12 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student12)
offer_enrollment13 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student13)
offer_enrollment14 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student14)
offer_enrollment15 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student15)
offer_enrollment16 = OfferEnrollmentFactory(offer_year=offers['PHYS11BA'], student=student16)
offer_enrollment4 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student1)
offer_enrollment5 = OfferEnrollmentFactory(offer_year=offers['ECON2M1'], student=student2)
# unité d'enseignement = learning_unit_year
learning_unit = LearningUnitFactory()
learning_unit_year_1 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit)
tutor = TutorFactory(person=person)
attribution = AttributionFactory(
tutor=tutor, learning_unit_year=learning_unit_year_1,
score_responsible=True
)
learning_unit2 = LearningUnitFactory()
learning_unit_year_2 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit2)
learning_unit3 = LearningUnitFactory()
learning_unit_year_3 = LearningUnitYearFactory(academic_year=academic_year, learning_unit=learning_unit3)
learning_unit_enrollment1 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment2 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment2, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment3 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment3, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment10 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment10, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment11 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment11, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment12 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment12, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment13 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment13, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment14 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment14, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment15 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment15, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment16 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment16, learning_unit_year=learning_unit_year_1)
learning_unit_enrollment4 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment4, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment5 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_2)
learning_unit_enrollment6 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment1, learning_unit_year=learning_unit_year_3)
learning_unit_enrollment7 = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment5, learning_unit_year=learning_unit_year_3)
session_exam_calendar = SessionExamCalendarFactory(academic_calendar=academic_calendar)
session_exam_phys11ba = SessionExamFactory(learning_unit_year=learning_unit_year_1, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
session_exam_econ2m1 = SessionExamFactory(learning_unit_year=learning_unit_year_2, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_3 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['ECON2M1'])
session_exam_4 = SessionExamFactory(learning_unit_year=learning_unit_year_3, number_session=session_exam_calendar.number_session, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['PHYS11BA'])
offer_year_calendar = OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offers['ECON2M1'])
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment1, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment2, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment3, session_exam=session_exam_phys11ba)
exam_enrollment_10 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment10, session_exam=session_exam_phys11ba)
exam_enrollment_11 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment11, session_exam=session_exam_phys11ba)
exam_enrollment_12 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment12, session_exam=session_exam_phys11ba)
exam_enrollment_13 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment13, session_exam=session_exam_phys11ba)
exam_enrollment_14 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment14, session_exam=session_exam_phys11ba)
exam_enrollment_15 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment15, session_exam=session_exam_phys11ba)
exam_enrollment_16 = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment16, session_exam=session_exam_phys11ba)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment4, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment5, session_exam=session_exam_econ2m1)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment6, session_exam=session_exam_3)
ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment7, session_exam=session_exam_4)
self.login(user.username)
self.goto('scores_encoding')
self.click_on('lnk_encode_{}'.format(learning_unit_year_1.id))
self.assertElementTextEqualInt('number_of_enrollments', 10)
note_enrollments = {}
for counter in range(1, 11):
element = self.driver.find_element_by_css_selector("[tabindex='%d']" % counter)
element_id = element.get_attribute('id')
enrollment_id = int(element_id.split('_')[-1])
self.fill_by_id(element_id, counter)
note_enrollments[enrollment_id] = counter
self.click_on('bt_save_online_encoding_up')
self.goto('scores_encoding')
self.fill_by_id('txt_acronym', learning_unit_year_1.acronym)
self.click_on('bt_submit_offer_search')
time.sleep(1)
self.click_on('lnk_via_paper')
time.sleep(1)
self.click_on('lnk_notes_printing_{}'.format(learning_unit_year_1.id))
time.sleep(1)
filename = 'Feuille de notes.pdf'
full_path = os.path.join(self.full_path_temp_dir, filename)
self.assertTrue(os.path.exists(full_path))
mimetype = magic.from_file(full_path, mime=True)
self.assertEqual(mimetype, 'application/pdf')
class Scenario7FunctionalTest(SeleniumTestCase, BusinessMixin):
def test(self):
user, person = self.create_user_person()
academic_year, academic_calendar = self.create_academic_year_calendar()
acronyms = ['PHYS11BA', 'ECON2M1', 'PHYS1BA', 'PHYS2M1', 'PHYS2MA']
offers = self.create_offers(academic_year, acronyms, person=person)
offer_year = offers['PHYS11BA']
OfferYearCalendarFactory(academic_calendar=academic_calendar, offer_year=offer_year)
# unité d'enseignement = learning_unit_year
learning_unit_year = LearningUnitYearFactory(academic_year=academic_year)
AttributionFactory(
tutor=TutorFactory(person=person),
learning_unit_year=learning_unit_year,
score_responsible=True
)
session_exam_calendar = SessionExamCalendarFactory(academic_calendar=academic_calendar)
session_exam = self.create_session_exam(learning_unit_year, session_exam_calendar, offer_year)
exam_enrollments = self.create_exam_enrollments(offer_year, learning_unit_year, session_exam)
self.login(user.username)
self.goto('scores_encoding')
self.click_on('lnk_encode_{}'.format(learning_unit_year.id))
self.assertElementTextEqualInt('number_of_enrollments', len(exam_enrollments))
note_enrollments = {}
for counter in range(1, 11):
element = self.driver.find_element_by_css_selector("[tabindex='%d']" % counter)
element_id = element.get_attribute('id')
enrollment_id = int(element_id.split('_')[-1])
self.fill_by_id(element_id, counter)
note_enrollments[enrollment_id] = counter
self.click_on('bt_save_online_encoding_up')
score_encoding = ScoresEncodingPage(self.driver, base_url=self.get_url_by_name('scores_encoding')).open()
time.sleep(1)
score_encoding.via_paper.click()
# self.click_on('lnk_via_paper')
time.sleep(1)
self.click_on('lnk_notes_printing_{}'.format(learning_unit_year.id))
time.sleep(1)
filename = 'Feuille de notes.pdf'
self.assertBrowserFileExists(filename, 'application/pdf')
def assertBrowserFileExists(self, filename, mimetype=None):
path = os.path.join(self.full_path_temp_dir, filename)
self.assertTrue(os.path.exists(path))
if mimetype:
self.assertEqual(mimetype, magic.from_file(path, mime=True))
def create_student(self, offer_year, learning_unit_year, session_exam):
student = StudentFactory()
offer_enrollment = OfferEnrollmentFactory(offer_year=offer_year, student=student)
learning_unit_enrollment = LearningUnitEnrollmentFactory(offer_enrollment=offer_enrollment,
learning_unit_year=learning_unit_year)
enrollment = ExamEnrollmentFactory(learning_unit_enrollment=learning_unit_enrollment, session_exam=session_exam)
return student, enrollment
def create_user_person(self):
user = self.create_user()
self.add_group(user, 'program_managers', 'tutors')
self.add_permission(user, 'can_access_academic_calendar', 'assessments.can_access_scoreencoding')
person = PersonFactory(
user=user,
first_name=user.first_name,
last_name=user.last_name,
language='fr-be'
)
return user, person
@classmethod
def create_academic_year_calendar(self, year=None, start_date=None, days=None):
if year is None:
year = pendulum.today().year - 1
academic_year = AcademicYearFactory(year=year)
academic_calendar = AcademicCalendarExamSubmissionFactory.build(academic_year=academic_year)
academic_calendar.save()
return academic_year, academic_calendar
@classmethod
def create_session_exam(self, learning_unit_year, session_exam_calendar, offer_year):
return SessionExamFactory(
learning_unit_year=learning_unit_year,
number_session=session_exam_calendar.number_session,
offer_year=offer_year
)
def create_exam_enrollments(self, offer_year, learning_unit_year, session_exam, number_of_students=10):
return [
self.create_student(offer_year, learning_unit_year, session_exam)[1]
for counter in range(number_of_students)
]
@classmethod
def create_offers(cls, academic_year, acronyms, person=None):
assert isinstance(acronyms, (list, tuple)) and len(acronyms) > 0
offers = {
acronym: OfferYearFactory(academic_year=academic_year, acronym=acronym)
for acronym in acronyms
}
if person:
for offer in offers.values():
ProgramManagerFactory(offer_year=offer, person=person)
return offers
import pypom
class Field:
def __init__(self, *locator):
self.locator = locator
class InputField(Field):
def __set__(self, obj, value):
element = obj.find_element(*self.locator)
element.clear()
if value is not None:
element.send_keys(value)
def __get__(self, obj, owner):
element = obj.find_element(*self.locator)
return element.get_attribute('value')
class SubmitField(Field):
def __get__(self, obj, owner):
return obj.find_element(*self.locator)
class ScoresEncodingPage(pypom.Page):
acronym = InputField(By.ID, 'txt_acronym')
search_button = SubmitField(By.ID, 'bt_submit_offer_search')
via_paper = SubmitField(By.ID, 'lnk_via_paper')
def search(self, acronym=None):
self.acronym = acronym
self.search_button.click()
| uclouvain/osis_louvain | assessments/tests/functionals/test_score_encoding.py | Python | agpl-3.0 | 57,982 | 0.004933 |
"""
utilsMDS.py
author: Kevin Jamieson (kevin.g.jamieson@gmail.com)
edited: 1/18/15
This module has methods that assist with non-metric multidimensional scaling.
If you're trying to COMPUTE an embedding, you might simply call:
X,emp_loss = computeEmbedding(n,d,S)
You may also consider getLoss to check how well an embedding is performing.
"""
from numpy import *
from numpy.random import *
import numpy.random
from numpy.linalg import *
#eig = numpy.linalg
norm = linalg.norm
floor = math.floor
ceil = math.ceil
import time
def main():
"""
Example of Usage
Creates some fake data and finds an embedding
"""
# generate some fake data
n = 30
d = 2
m = int(ceil(40*n*d*log(n))) # number of labels
p = 0.1; # error rate
Strain = []
Stest = []
Xtrue = randn(n,d);
for iter in range(0,m):
# get random triplet
q,score = getRandomQuery(Xtrue)
# align it so it agrees with Xtrue: "q[2] is more similar to q[0] than q[1]"
query_ordering_disagrees_with_Xtrue = score<0
if query_ordering_disagrees_with_Xtrue:
q = [ q[i] for i in [1,0,2]]
# add some noise
R = rand()
if R<p:
q = [ q[i] for i in [1,0,2]]
if iter < .9*m:
Strain.append(q)
else:
Stest.append(q)
# compute embedding
X,emp_loss_train = computeEmbedding(n,d,Strain,num_random_restarts=2,epsilon=0.01,verbose=True)
# compute loss on test set
emp_loss_test,hinge_loss_test = getLoss(X,Stest)
print
print 'Training loss = %f, Test loss = %f' %(emp_loss_train,emp_loss_test)
def getRandomQuery(X):
"""
Outputs a triplet [i,j,k] chosen uniformly at random from all possible triplets
and score = abs( ||x_i - x_k||^2 - ||x_j - x_k||^2 )
Inputs:
(numpy.ndarray) X : matrix from which n is extracted from and score is derived
Outputs:
[(int) i, (int) j, (int) k] q : where k in [n], i in [n]-k, j in [n]-k-j
(float) score : signed distance to current solution (positive if it agrees, negative otherwise)
Usage:
q,score = getRandomQuery(X)
"""
n,d = X.shape
while True:
i = randint(n)
j = randint(n)
k = randint(n)
if i != j and j != k and k != i:
break
q = [i, j, k]
score = getTripletScore(X,q)
return q,score
def getTripletScore(X,q):
"""
Given X,q=[i,j,k] returns score = ||x_j - x_k||^2 - ||x_i - x_k||^2
If score > 0 then the triplet agrees with the embedding, otherwise it does not
Usage:
score = getTripletScore(X,[3,4,5])
"""
i,j,k = q
return dot(X[j],X[j]) -2*dot(X[j],X[k]) + 2*dot(X[i],X[k]) - dot(X[i],X[i])
def getLoss(X,S):
"""
Returns loss on X with respect to list of triplets S: 1/len(S) \sum_{q in S} loss(X,q).
Intuitively, q=[i,j,k] "agrees" with X if ||x_j - x_k||^2 > ||x_i - x_k||^2.
For q=[i,j,k], let s(X,q) = ||x_j - x_k||^2 - ||x_i - x_k||^2
If loss is hinge_loss then loss(X,q) = max(0,1-s(X,q))
If loss is emp_loss then loss(X,q) = 1 if s(X,q)<0, and 0 otherwise
Usage:
emp_loss, hinge_loss = getLoss(X,S)
"""
n = X.shape[0]
d = X.shape[1]
emp_loss = 0 # 0/1 loss
hinge_loss = 0 # hinge loss
for q in S:
loss_ijk = getTripletScore(X,q)
hinge_loss = hinge_loss + max(0,1. - loss_ijk)
if loss_ijk < 0:
emp_loss = emp_loss + 1.
emp_loss = emp_loss/len(S)
hinge_loss = hinge_loss/len(S)
return emp_loss, hinge_loss
def getGradient(X,S):
"""
Returns normalized gradient of hinge loss wrt to X and S.
Intuitively, q=[i,j,k] "agrees" with X if ||x_j - x_k||^2 > ||x_i - x_k||^2.
For q=[i,j,k], let s(X,q) = ||x_j - x_k||^2 - ||x_i - x_k||^2
If loss is hinge_loss then loss(X,q) = max(0,1-s(X,q))
Usage:
G,avg_grad_row_norm_sq,max_grad_row_norm_sq,avg_row_norm_sq = getGradient(X,S)
"""
n,d = X.shape
m = len(S)
# pattern for computing gradient
H = mat([[2.,0.,-2.],[ 0., -2., 2.],[ -2., 2., 0.]])
# compute gradient
G = zeros((n,d))
for q in S:
score = getTripletScore(X,q)
if 1.-score>0:
grad_partial = dot(H,X[q,:])/m
G[q,:] = G[q,:] + grad_partial
# compute statistics about gradient used for stopping conditions
mu = mean(X,0)
avg_row_norm_sq = 0.
avg_grad_row_norm_sq = 0.
max_grad_row_norm_sq = 0.
norm_grad_sq_0 = 0.
for i in range(n):
row_norm_sq = 0
grad_row_norm_sq = 0
for j in range(d):
row_norm_sq += (X[i,j]-mu[j])*(X[i,j]-mu[j])
grad_row_norm_sq += G[i,j]*G[i,j]
avg_row_norm_sq += row_norm_sq/n
avg_grad_row_norm_sq += grad_row_norm_sq/n
max_grad_row_norm_sq = max(max_grad_row_norm_sq,grad_row_norm_sq)
return G,avg_grad_row_norm_sq,max_grad_row_norm_sq,avg_row_norm_sq
def computeEmbedding(n,d,S,num_random_restarts=0,max_num_passes=0,max_iter_GD=0,max_norm=0,epsilon=0.01,verbose=False):
"""
Computes an embedding of n objects in d dimensions usin the triplets of S.
S is a list of triplets such that for each q in S, q = [i,j,k] means that
object k should be closer to i than j.
Inputs:
(int) n : number of objects in embedding
(int) d : desired dimension
(list [(int) i, (int) j,(int) k]) S : list of triplets, i,j,k must be in [n].
(int) num_random_restarts : number of random restarts (nonconvex
optimization, may converge to local minima). E.g., 9 random restarts
means take the best of 10 runs of the optimization routine.
(int) max_num_passes : maximum number of passes over data SGD makes before proceeding to GD (default equals 16)
(int) max_iter_GD: maximum number of GD iteration (default equals 50)
(float) max_norm : the maximum allowed norm of any one object (default equals 10*d)
(float) epsilon : parameter that controls stopping condition, smaller means more accurate (default = 0.01)
(boolean) verbose : outputs some progress (default equals False)
Outputs:
(numpy.ndarray) X : output embedding
(float) gamma : Equal to a/b where a is max row norm of the gradient matrix and b is the avg row norm of the centered embedding matrix X. This is a means to determine how close the current solution is to the "best" solution.
"""
if max_num_passes==0:
max_num_passes_SGD = 16
else:
max_num_passes_SGD = max_num_passes
if max_iter_GD ==0:
max_iter_GD = 50
X_old = None
emp_loss_old = float('inf')
num_restarts = -1
while num_restarts < num_random_restarts:
num_restarts += 1
ts = time.time()
X,acc = computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=max_num_passes_SGD,max_norm=max_norm,epsilon=epsilon,verbose=verbose)
te_sgd = time.time()-ts
ts = time.time()
X_new,emp_loss_new,hinge_loss_new,acc_new = computeEmbeddingWithGD(X,S,max_iters=max_iter_GD,max_norm=max_norm,epsilon=epsilon,verbose=verbose)
te_gd = time.time()-ts
if emp_loss_new<emp_loss_old:
X_old = X_new
emp_loss_old = emp_loss_new
if verbose:
print "restart %d: emp_loss = %f, hinge_loss = %f, duration=%f+%f" %(num_restarts,emp_loss_new,hinge_loss_new,te_sgd,te_gd)
return X_old,emp_loss_old
def computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=0,max_norm=0,epsilon=0.01,a0=0.1,verbose=False):
"""
Performs epochSGD where step size is constant across each epoch, epochs are
doubling in size, and step sizes are getting cut in half after each epoch.
This has the effect of having a step size decreasing like 1/T. a0 defines
the initial step size on the first epoch.
S is a list of triplets such that for each q in S, q = [i,j,k] means that
object k should be closer to i than j.
Inputs:
(int) n : number of objects in embedding
(int) d : desired dimension
(list [(int) i, (int) j,(int) k]) S : list of triplets, i,j,k must be in [n].
(int) max_num_passes : maximum number of passes over data (default equals 16)
(float) max_norm : the maximum allowed norm of any one object (default equals 10*d)
(float) epsilon : parameter that controls stopping condition (default = 0.01)
(float) a0 : inititial step size (default equals 0.1)
(boolean) verbose : output iteration progress or not (default equals False)
Outputs:
(numpy.ndarray) X : output embedding
(float) gamma : Equal to a/b where a is max row norm of the gradient matrix and b is the avg row norm of the centered embedding matrix X. This is a means to determine how close the current solution is to the "best" solution.
Usage:
X,gamma = computeEmbeddingWithEpochSGD(n,d,S)
"""
m = len(S)
# norm of each object is equal to 1 in expectation
X = randn(n,d)
if max_num_passes==0:
max_iters = 16*m
else:
max_iters = max_num_passes*m
if max_norm==0:
max_norm = 10.*d
# pattern for computing gradient
H = mat([[2.,0.,-2.],[ 0., -2., 2.],[ -2., 2., 0.]])
epoch_length = m
a = a0
t = 0
t_e = 0
# check losses
if verbose:
emp_loss,hinge_loss = getLoss(X,S)
print "iter=%d, emp_loss=%f, hinge_loss=%f, a=%f" % (0,emp_loss,hinge_loss,a)
rel_max_grad = None
while t < max_iters:
t += 1
t_e += 1
# check epoch conditions, udpate step size
if t_e % epoch_length == 0:
a = a*0.5
epoch_length = 2*epoch_length
t_e = 0
if epsilon>0 or verbose:
# get losses
emp_loss,hinge_loss = getLoss(X,S)
# get gradient and check stopping-time statistics
G,avg_grad_row_norm_sq,max_grad_row_norm_sq,avg_row_norm_sq = getGradient(X,S)
rel_max_grad = sqrt( max_grad_row_norm_sq / avg_row_norm_sq )
rel_avg_grad = sqrt( avg_grad_row_norm_sq / avg_row_norm_sq )
if verbose:
print "iter=%d, emp_loss=%f, hinge_loss=%f, rel_avg_grad=%f, rel_max_grad=%f, a=%f" % (t,emp_loss,hinge_loss,rel_avg_grad,rel_max_grad,a)
if rel_max_grad < epsilon:
break
# get random triplet unifomrly at random
q = S[randint(m)]
# take gradient step
score = getTripletScore(X,q)
if 1.-score>0:
grad_partial = dot(H,X[q,:])
X[q,:] = X[q,:] - a*grad_partial
# # project back onto ball such that norm(X[i])<=max_norm
for i in q:
norm_i = norm(X[i])
if norm_i>max_norm:
X[i] = X[i] * (max_norm / norm_i)
return X,rel_max_grad
def computeEmbeddingWithGD(X,S,max_iters=0,max_norm=0,epsilon=0.01,c1=0.0001,rho=0.5,verbose=False):
"""
Performs gradient descent with geometric amarijo line search (with parameter c1)
S is a list of triplets such that for each q in S, q = [i,j,k] means that
object k should be closer to i than j.
Implements line search algorithm 3.1 of page 37 in Nocedal and Wright (2006) Numerical Optimization
Inputs:
(numpy.ndarray) X : input embedding
(list [(int) i, (int) j,(int) k]) S : list of triplets, i,j,k must be in [n].
(int) max_iters : maximum number of iterations of SGD (default equals 40*len(S))
(float) max_norm : the maximum allowed norm of any one object (default equals 10*d)
(float) epsilon : parameter that controls stopping condition, exits if gamma<epsilon (default = 0.01)
(float) c1 : Amarijo stopping condition parameter (default equals 0.0001)
(float) rho : Backtracking line search parameter (default equals 0.5)
(boolean) verbose : output iteration progress or not (default equals False)
Outputs:
(numpy.ndarray) X : output embedding
(float) emp_loss : output 0/1 error
(float) hinge_loss : output hinge loss
(float) gamma : Equal to a/b where a is max row norm of the gradient matrix and b is the avg row norm of the centered embedding matrix X. This is a means to determine how close the current solution is to the "best" solution.
Usage:
X,gamma = computeEmbeddingWithGD(X,S)
"""
m = len(S)
n,d = X.shape
if max_iters==0:
max_iters = 100
if max_norm==0:
max_norm = 10.*d
# check losses
if verbose:
emp_loss,hinge_loss = getLoss(X,S)
print "iter=%d, emp_loss=%f, hinge_loss=%f, a=%f" % (0,emp_loss,hinge_loss,float('nan'))
alpha = .5
t = 0
emp_loss_0 = float('inf')
hinge_loss_0 = float('inf')
rel_max_grad = float('inf')
while t < max_iters:
t+=1
# get gradient and stopping-time statistics
G,avg_grad_row_norm_sq,max_grad_row_norm_sq,avg_row_norm_sq = getGradient(X,S)
rel_max_grad = sqrt( max_grad_row_norm_sq / avg_row_norm_sq )
rel_avg_grad = sqrt( avg_grad_row_norm_sq / avg_row_norm_sq )
if rel_max_grad < epsilon:
break
# perform backtracking line search
alpha = 2*alpha
emp_loss_0,hinge_loss_0 = getLoss(X,S)
norm_grad_sq_0 = avg_grad_row_norm_sq*n
emp_loss_k,hinge_loss_k = getLoss(X-alpha*G,S)
inner_t = 0
while hinge_loss_k > hinge_loss_0 - c1*alpha*norm_grad_sq_0:
alpha = alpha*rho
emp_loss_k,hinge_loss_k = getLoss(X-alpha*G,S)
inner_t += 1
X = X-alpha*G
# project back onto ball such that norm(X[i])<=max_norm
for i in range(n):
norm_i = norm(X[i])
if norm_i>max_norm:
X[i] = X[i] * (max_norm / norm_i)
# check losses
if verbose:
print "hinge iter=%d, emp_loss=%f, hinge_loss=%f, rel_avg_grad=%f, rel_max_grad=%f, a=%f, i_t=%d" % (t,emp_loss_k,hinge_loss_k,rel_avg_grad,rel_max_grad,alpha,inner_t)
return X,emp_loss_0,hinge_loss_0,rel_max_grad
if __name__ == "__main__":
main()
| nextml/NEXT | apps/PoolBasedTripletMDS/algs/ValidationSampling/utilsMDS.py | Python | apache-2.0 | 14,387 | 0.018002 |
#!/usr/bin/env python
from mvbb.box_db import MVBBLoader
import multiprocessing, subprocess
from multiprocessing import Pool
import sys
from plugins import soft_hand
def grasp_boxes(filename):
subprocess.call(['python', './grasp_boxes_batch.py', filename])
if __name__ == '__main__':
try:
import os.path
filename = os.path.splitext(sys.argv[1])[0]
except:
filename = 'box_db'
if not os.path.isfile(filename+'.csv'):
print "Error: file", filename, "doesn't exist"
exit()
try:
n_dofs = int(sys.argv[2])
n_l = int(sys.argv[3])
except:
n_dofs = soft_hand.numJoints
n_l = len(soft_hand.links_to_check)
# for SoftHand
box_db = MVBBLoader(filename, n_dofs, n_l)
filenames = box_db.split_db()
p = Pool(multiprocessing.cpu_count())
p.map(grasp_boxes, filenames)
box_db.join_results(filenames)
| lia2790/grasp_learning | python/simple_batch_splitter.py | Python | bsd-3-clause | 913 | 0.004381 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.models.test_results import TestResult
class TestResultsTest(unittest.TestCase):
def test_defaults(self):
result = TestResult("foo")
self.assertEqual(result.test_name, 'foo')
self.assertEqual(result.failures, [])
self.assertEqual(result.test_run_time, 0)
def test_loads(self):
result = TestResult(test_name='foo',
failures=[],
test_run_time=1.1)
s = result.dumps()
new_result = TestResult.loads(s)
self.assertIsInstance(new_result, TestResult)
self.assertEqual(new_result, result)
# Also check that != is implemented.
self.assertFalse(new_result != result)
| klim-iv/phantomjs-qt5 | src/webkit/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py | Python | bsd-3-clause | 2,300 | 0 |
import random
import math
import collections
import tree_decomposition as td
import create_production_rules as pr
import graph_sampler as gs
import stochastic_growth
import probabilistic_growth
import net_metrics
import matplotlib.pyplot as plt
import product
import networkx as nx
import numpy as np
import snap
#G = snap.GenRndGnm(snap.PUNGraph, 10000, 5000)
#G = nx.grid_2d_graph(4,4)
#line
#G = nx.Graph()
#G.add_edge(1, 2)
#G.add_edge(2, 3)
#G.add_edge(3, 4)
#G.add_edge(4, 5)
#G.add_edge(5, 6)
#G.add_edge(6, 7)
#G.add_edge(7, 8)
#G.add_edge(8, 9)
#G.add_edge(9, 10)
#G.add_edge(10, 1) #circle
#G = nx.star_graph(6)
#G = nx.ladder_graph(10)
#G = nx.karate_club_graph()
#nx.write_edgelist((G.to_directed()), '../demo_graphs/karate.txt', comments="#", delimiter=' ', data=False)
#exit()
#G = nx.barabasi_albert_graph(1000,3)
#G = nx.connected_watts_strogatz_graph(200,8,.2)
#G = nx.read_edgelist("../demo_graphs/as20000102.txt")
G = nx.read_edgelist("../demo_graphs/CA-GrQc.txt")
#G = nx.read_edgelist("../demo_graphs/Email-Enron.txt")
#G = nx.read_edgelist("../demo_graphs/Brightkite_edges.txt")
G= list(nx.connected_component_subgraphs(G))[0]
##board example
#G = nx.Graph()
#G.add_edge(1, 2)
#G.add_edge(2, 3)
#G.add_edge(2, 4)
#G.add_edge(3, 4)
#G.add_edge(3, 5)
#G.add_edge(4, 6)
#G.add_edge(5, 6)
#G.add_edge(1, 5)
# print G.number_of_nodes()
num_nodes = G.number_of_nodes()
print num_nodes
print
print "--------------------"
print "------- Edges ------"
print "--------------------"
num_edges = G.number_of_edges()
print num_edges
#print
#print "--------------------"
#print "------ Cliques -----"
#print "--------------------"
#print list(nx.find_cliques(G))
if not nx.is_connected(G):
print "Graph must be connected";
exit()
G.remove_edges_from(G.selfloop_edges())
if G.number_of_selfloops() > 0:
print "Graph must be not contain self-loops";
exit()
Ggl = gs.subgraphs_cnt(G,100)
setlendf = []
if num_nodes>400:
#for i in range(0,10):
# setlen = []
# for i in range(10,510, 20):
for Gprime in gs.rwr_sample(G, 10, 500):
pr.prod_rules = {}
T = td.quickbb(Gprime)
prod_rules = pr.learn_production_rules(Gprime, T)
# setlen.append(len(rule_probabilities))
print prod_rules
else:
T = td.quickbb(G)
prod_rules = pr.learn_production_rules(G, T)
print "Rule Induction Complete"
exit()
Gergm = []
Gergmgl = []
for run in range(1, 3):
f = open('../demo_graphs/ergm_sim/enron/data '+str(run)+' .csv', 'r')
E=nx.Graph()
header = 0
for line in f:
line=line.rstrip()
if header == 0:
header+=1
continue
c = line.split("\t")
if(len(c) is not 3): continue
E.add_edge(int(c[1]),int(c[2]))
if int(c[1]) > num_nodes or int(c[2]) > num_nodes:
continue
Gergm.append(E)
print "G ergm iteration " + str(run) + " of 20"
Gergmgl.append(gs.subgraphs_cnt(E,50))
k = int(math.floor(math.log(num_nodes, 10)))
P = [[.9716,.658],[.5684,.1256]] #karate
P = [[.8581,.5116],[.5063,.2071]] #as20000102
#P = [[.7317,.5533],[.5354,.2857]] #dblp
#P = [[.9031,.5793],[.5051,.2136]] #ca-grqc
#P = [[.9124,.5884],[.5029,.2165]] #enron
P = [[.8884,.5908],[.5628,.2736]] #brightkite
Gkron = product.kronecker_random_graph(k,P).to_undirected()
print("GKron finished")
sum = .9716+.5382+.5684+.1256 #karate
#sum = .8581+.5116+.5063+.2071 #as20000102
#sum = .7317+.5533+.5354+.2857 # dblp
#sum = .9031+.5793+.5051+.2136 #ca-grqc
#sum = .9124+.5884+.5029+.2165 #enron
sum = .8884+.5908+.5628+.2736 #brightkite
GRmatSNAP = snap.GenRMat(num_nodes, num_edges, P[0][0]/sum, P[0][1]/sum, P[1][0]/sum)
GRmat = nx.Graph()
for EI in GRmatSNAP.Edges():
GRmat.add_edge(EI.GetSrcNId(), EI.GetDstNId())
print("GRMAT finished")
GRmatgl = gs.subgraphs_cnt(GRmat,100)
n_distribution = {}
Gstar = []
Dstar = []
Gstargl = []
for run in range(0, 20):
nG, nD = stochastic_growth.grow(prod_rules, num_nodes/10,0)#num_nodes/50)
Gstar.append(nG)
Dstar.append(nD)
Gstargl.append(gs.subgraphs_cnt(nG,100))
#Gstar.append(probabilistic_growth.grow(rule_probabilities,prod_rule_set, num_nodes))
print "G* iteration " + str(run) + " of 20"
print(nD)
print ""
print "G* Samples Complete"
label = "AS"
net_metrics.draw_graphlet_plot(Ggl, Gstargl, Gergmgl, Gkron, GRmatgl, label, plt.figure())
exit()
net_metrics.draw_diam_plot(G, Dstar, Gergm, Gkron, GRmat, label, plt.figure())
net_metrics.draw_degree_rank_plot(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
#net_metrics.draw_scree_plot(G, Gstar, label, ax1)
net_metrics.draw_network_value(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
net_metrics.draw_hop_plot(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
#ax1.plot(ef.mean().index, ef.mean()[1],'b')
net_metrics.save_plot_figure_2disk() | abitofalchemy/hrg_nets | karate_chop.py | Python | gpl-3.0 | 4,865 | 0.019527 |
#!/usr/bin/env python
## Some necessary imports
from __future__ import print_function
from commands import getoutput
from time import sleep
from os.path import expanduser
import os
import re
from datetime import datetime
import process_lock as pl
###
## Configuration options
script_location = os.path.dirname(os.path.realpath(__file__))
proxy_ssid = ["iiscwlan", "opbwlan"] # Add whatever SSIDs you want to use the proxy for
proxy_set_script = "bash {0}/iisc_proxy_set.sh".format(script_location) # The script you want to run to turn on proxy
proxy_unset_script = "bash {0}/proxy_unset.sh".format(script_location) # The script to turn off proxy
checking_interval = 2 # The checking frequency in seconds.
default_log_file = expanduser("~/.proxy_log") # Where the logging will happen.
ssid_matcher=re.compile("ESSID:\"[\w]*\"") # A regular expression to match to the output of iwconfig.
ssid_slice=slice(7, -1)
## Logs the string to the log file and stdout.
def log_output(string, log_file=default_log_file):
now = datetime.now()
timestamped_string = "[{0}:{1}:{2}-{3}/{4}/{5}] {6}".format(now.hour, now.minute, now.second, now.day, now.month, now.year, string)
file_to_write = open(log_file, "a")
file_to_write.write(timestamped_string)
print(timestamped_string, end="")
file_to_write.close()
###
def set_proxy():
log_output(str(getoutput(proxy_set_script))+'\n')
log_output(str(getoutput("cp {0}/proxy_settings_iiscwlan ~/.current_proxy".format(script_location)))+'\n')
def unset_proxy():
log_output(str(getoutput(proxy_unset_script))+'\n')
log_output(str(getoutput("cp {0}/proxy_settings_other ~/.current_proxy".format(script_location)))+'\n')
def get_ssid():
out=getoutput('/sbin/iwconfig')
result=ssid_matcher.search(out)
if result:
return result.string[result.start():result.end()][ssid_slice]
else:
return None
def main(interval=2):
current_ssid=get_ssid()
if current_ssid and current_ssid in proxy_ssid:
log_output("Detected proxy network. Trying to set proxy.\n")
set_proxy()
else:
log_output("WiFi off or non-proxy network detected. Trying to unset proxy.\n")
unset_proxy()
while True:
if not current_ssid:
log_output("WiFi is off. Doing nothing.\n")
else:
log_output("WiFi is on. Current ssid is {0}.\n".format(current_ssid))
sleep(interval)
new_ssid=get_ssid()
if new_ssid!=current_ssid:
if new_ssid and new_ssid in proxy_ssid:
log_output("Proxy network detected. Trying to set proxy.\n")
set_proxy()
else:
log_output("WiFi off or non-proxy network detected.\n")
unset_proxy()
current_ssid=new_ssid
if __name__=="__main__":
try:
import psutil
pid = os.getpid()
if not pl.process_is_running("proxy_autoconfig", [pid]):
main(checking_interval)
else:
print("Process already running.")
except ImportError:
main(checking_interval)
| Bolt64/proxy_switcher | proxy_autoconfig.py | Python | mit | 3,103 | 0.010957 |
from __future__ import division
import itertools
from sklearn import mixture, metrics
from sklearn.cluster import DBSCAN
from scipy import linalg
from scipy.spatial import distance
import pylab as pl
import matplotlib as mpl
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
import csv
import numpy as np
# reading file
for action in ['wax', 'rotate', 'move', 'fold', 'paint']:
actionName=action
print "Action: ", actionName
# 6
reader=csv.reader(open("/home/santi/Repositories/cognitive/xgnitive/main/app/record/recorded3/"+actionName+"_6/data.log","rb"),delimiter=' ')
x=list(reader)
temp4=np.array(x).astype('float')
# Get the time range and rescale
# change made here
r = float(temp4[-1][1] - temp4[0][1])
temp4[:,1] = map(lambda x: (x - temp4[0][1]) / r, temp4[:,1])
##normalize (optional)
#temp4 /= np.max(np.abs(temp4), axis=0)
###########################################
######## Theoretical Normalization #########
## locX0 locY0 locZ0 area hue sat val angle
############################################
## spatial
## x
#temp4[:,2] /= 5000
## y
#temp4[:,3] /= 2000
## z
#temp4[:,4] /= 2000
## area
#temp4[:,5] /= 307200
## hue
#temp4[:,6] /= 180
## sat
#temp4[:,7] /= 255
## val
#temp4[:,8] /= 255
##angle
#temp4[:,9] /= 180
###
realDataMatrix= np.vstack([temp4])
# deletes first column (only -1 values)
realDataMatrix= np.delete(realDataMatrix,0,axis=1)
## bad way to delete last 8 columns
for d in range(8):
realDataMatrix = np.delete(realDataMatrix,9,axis=1)
#if: test all dimensions
Xnoisy = realDataMatrix # noisy dataset
#Xnoisy = sorted(Xnoisy, key=lambda column: column[1])
X=[]
##else: choose dimensions to be shown (dimOne=time, dimTwo=feature to measure)
#dimOne = realDataMatrix[:,0]
#dimTwo = realDataMatrix[:,1]
#Xnoisy = np.array([dimOne,dimTwo]).T # noisy dataset
#X=[] # future clean dataset
# Compute similarities
D = distance.squareform(distance.pdist(Xnoisy))
S = 1 - (D / np.max(D))
# Compute DBSCAN
db = DBSCAN(eps=0.001, min_samples=10, metric='cosine').fit(S)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
# Plotting DBSCAN (but also outlier detection)
core_samples = db.core_sample_indices_
unique_labels = set(labels)
preplot = pl.subplot(4, 1, 1)
colors = pl.cm.Blues(np.linspace(0, 1, len(unique_labels)))
outliers=[]
for k, col in zip(unique_labels, colors):
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
if index in core_samples and k != -1:
markersize = 8
X.append(Xnoisy[index])
pl.plot(Xnoisy[index][0], Xnoisy[index][1],'o', markerfacecolor=col, markeredgecolor='k', markersize=markersize)
else:
markersize = 3
pl.plot(Xnoisy[index][0], Xnoisy[index][1],'o', markerfacecolor='k', markeredgecolor='k', markersize=markersize)
if not X:
X=realDataMatrix #change here! to avoid null list
pl.xticks(())
pl.yticks(())
pl.title('DBSCAN. Estimated clusters: %d' % n_clusters_, size=20)
#assigning new clean dataset to variable X in numpy array
X = np.array(X)
# Initializing BIC parameters
lowest_bic = np.infty
bic = []
# choose number of clusters to test
if n_clusters_ <2:
componentToTest=3
else:
componentToTest=2*n_clusters_
print "Maximum components tested: ", componentToTest
n_components_range = range(1, componentToTest+1)
# this is a loop to test every component, choosing the lowest BIC at the end
for n_components in n_components_range:
# Fit a mixture of gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type='full')
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# over loading if compoenents = 1
print best_gmm
if len(best_gmm.means_)==1:
best_gmm = mixture.GMM(n_components=2, covariance_type='full')
best_gmm.fit(X)
## OVERLOAD A ELIMINAR
#best_gmm = mixture.GMM(n_components=12, covariance_type='full')
#best_gmm.fit(X)
# array of BIC for the graphic table column
bic = np.array(bic)
# one tested all components, here we choose the best
clf = best_gmm
print "Best result: ", clf
print 'Means: ', np.round(clf.means_,4)
## Plot the BIC scores
#bars = []
#spl = pl.subplot(4, 1, 2)
#xpos = np.array(n_components_range) - 0.1
#bars.append(pl.bar(xpos, bic[0:len(n_components_range)], width=.2, color='c'))
#pl.xticks(n_components_range, size=15)
#pl.yticks(([bic.min() * 1.01 - .01 * bic.max(), bic.max()]), size=12)
#pl.title('BIC Score', size=20)
#spl.set_xlabel('Number of components', size=15)
## Plot the winner
#splot = pl.subplot(4, 1, 3)
#Y_ = clf.predict(X)
#for i, (mean, covar) in enumerate(zip(clf.means_, clf.covars_)):
#v, w = linalg.eigh(covar)
#if not np.any(Y_ == i):
#continue
##pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 8, color='black')
#pl.plot(X[Y_ == i, 0], X[Y_ == i, 1], 'o', markerfacecolor='black', markeredgecolor='k', markersize=5)
## Plot an ellipse to show the Gaussian component
#angle = np.arctan2(w[0][1], w[0][0])
#angle = 180 * angle / np.pi # convert to degrees
#v *= 4
#ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color='b')
#ell.set_clip_box(splot.bbox)
#ell.set_alpha(.6)
#splot.add_artist(ell)
#pl.xticks(())
#pl.yticks(())
#pl.title('GMM-BIC. Components: ' + str(len(clf.means_)), size=20)
## saving centers
sortedPoints = sorted(clf.means_, key=lambda point: point[0])
np.savetxt("generalized/"+actionName+"Query", sortedPoints, fmt='%.14e')
## plot interpolation
#meansX, meansY = zip(*clf.means_)
#if len(meansX) > 1:
#minimTime=min(meansX)
#maximTime=max(meansX)
#print minimTime, maximTime
#xi = np.linspace(minimTime, maximTime, 10*len(meansX))
#testrbf = Rbf(meansX, meansY, function='gaussian')
#yi = testrbf(xi)
#pl.subplot(4, 1, 4)
#pl.plot(xi, yi, 'g')
#pl.scatter(meansX, meansY,20, color='blue')
#pl.xticks(())
#pl.yticks(())
#pl.title('RBF Interpolation', size=20)
#pl.subplots_adjust(hspace=.8, bottom=0.05)
#pl.show()
#else:
#pl.show()
#
| smorante/continuous-goal-directed-actions | simulated-CGDA/generalization/generalization_old_test2.py | Python | mit | 7,027 | 0.021346 |
# browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface functions for X11.
"""
__revision__ = "$Rev: 2749 $"
__date__ = "$Date: 2008-04-08 20:43:21 +0530 (Tue, 08 Apr 2008) $"
__author__ = "$Author: hawk $"
import os
from shotfactory04.gui import linux as base
class Gui(base.Gui):
"""
Special functions for Netscape Navigator.
"""
def reset_browser(self):
"""
Delete crash dialog and browser cache.
"""
home = os.environ['HOME']
self.delete_if_exists(os.path.join(home, '.netscape', 'cache'))
self.delete_if_exists(os.path.join(home, '.netscape', 'history.dat'))
self.delete_if_exists(os.path.join(home, '.netscape', 'cookies'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'Cache'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'sessionstore.js'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'history.dat'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'cookies.txt'))
def reuse_browser(self, config, url, options):
"""
Open a new URL in the same browser window.
"""
command = config['command'] or config['browser'].lower()
command = '%s -remote "OpenURL(%s,new-tab)"' % (command, url)
print "Running", command
error = self.shell(command)
if error:
raise RuntimeError("could not load new URL in the browser")
print "Sleeping %d seconds while page is loading." % (
options.reuse_wait)
time.sleep(options.reuse_wait / 2.0)
self.maximize()
time.sleep(options.reuse_wait / 2.0)
| mintuhouse/shotfactory | shotfactory04/gui/linux/navigator.py | Python | gpl-3.0 | 2,507 | 0 |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pyUniSR",
version = "0.0.7",
author = "Nicolo Balzarotti",
author_email = "anothersms@gmail.com",
description = ("Python class to access studenti.unisr.it (Univerity Vita-Salute San Raffaele, Milano)"),
license = "GPLv2",
keywords = "unisr class milano university raffaele",
url = "https://github.com/nico202/pyUniSR",
packages=['UniSR'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
)
| nico202/pyUniSR | setup.py | Python | gpl-2.0 | 919 | 0.020675 |
#-*- coding:utf-8 -*-
import string
from gi.repository import GObject, Gedit, Gtk, Pango
from settings import errorGenerator, jump_to_error_key, notification
ui_str = """<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_6">
<menuitem name="gfly" action="gfly"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
def getLineStartToEnd(doc, line):
""" get two Gtk.TextIter, start and end of line
Attribute:
line: integer of line number(start 0)
"""
s = doc.get_iter_at_line(line)
e = s.copy()
e.forward_line()
return s, e
def skipWhiteSpaces(itr):
""" skip white spaces of Gtk.TextIter
"""
while itr.get_char() in string.whitespace and itr.forward_char():
pass
return itr
def getLanguageName(doc):
""" get document's languageName
Attribute:
doc: GeditDocument
"""
lang = doc.get_language()
if lang:
return lang.get_name()
class TabWatch:
def __init__(self, window):
self.errorTag = None
self.currentConnectedTab = None
self.currentConnectedDoc = None
self.geditWindow = window
#connect sindow signal
self.currentConnectedTab = window.connect("active_tab_changed", self.__tab_changed)
def close(self, window):
if not self.currentConnectedTab is None:
window.disconnect(self.currentConnectedTab)
self.currentConnectedTab = None
if not self.currentConnectedDoc is None:
window.disconnect(self.currentConnectedDoc)
self.currentConnectedDoc = None
def __tab_changed(self, window, tab):
doc = window.get_active_document()
#connect sindow signal
if not self.currentConnectedTab is None:
window.disconnect(self.currentConnectedTab)
self.currentConnectedTab = None
self.currentConnectedTab = window.connect("active_tab_changed", self.__tab_changed)
#connect document signal
if not self.currentConnectedDoc is None:
window.disconnect(self.currentConnectedDoc)
self.currentConnectedDoc = None
doc.connect("saved", self.__doc_saved)
#connect view signal
tab.get_view().connect_after("move-cursor", self.__move_cursor)
#create tag for error
self.errorTag = doc.get_tag_table().lookup('errorTag')
if self.errorTag is None:
self.errorTag = doc.create_tag('errorTag', underline=Pango.Underline.ERROR)
self.draw_lines(doc)
def __doc_saved(self, doc, *args):
self.draw_lines(doc)
def draw_lines(self, doc):
# clear
s, e = doc.get_bounds()
doc.remove_tag(self.errorTag, s, e)
#generate error and apply new error tag
lang = getLanguageName(doc)
if errorGenerator.has_key(lang):
errorCount = 0
for g in errorGenerator[lang]:
try:
for i in g.generateErrorLines(doc.get_uri_for_display()):
s, e = getLineStartToEnd(doc, i - 1)
doc.apply_tag(self.errorTag, skipWhiteSpaces(s), e)
errorCount += 1
except EnvironmentError:
print "cannot generateErrorLines"
if notification:
self.errorNorify(errorCount)
def errorNorify(self, count):
if count <= 0:
return
try:
import pynotify
pynotify.init("gfly_notify")
if count == 1:
n = pynotify.Notification("gfly", "There is one error")
else:
n = pynotify.Notification("gfly", "There are %d error" % count)
n.show()
except ImportError:
pass
def __move_cursor(self, textview, *args):
global errorGenerator
doc = textview.get_buffer()
lang = getLanguageName(doc)
if errorGenerator.has_key(lang):
textview.set_has_tooltip(False)
cursorIter = doc.get_iter_at_mark(doc.get_insert())
cursorLine = cursorIter.get_line()
for g in errorGenerator[lang]:
if g.errorLineMsg.has_key(cursorLine + 1):
textview.set_has_tooltip(True)
textview.set_tooltip_text(g.errorLineMsg[cursorLine + 1])
def jump_error(self):
view = self.geditWindow.get_active_view()
doc = view.get_buffer()
lang = getLanguageName(doc)
if errorGenerator.has_key(lang):
cursorLine = doc.get_iter_at_mark(doc.get_insert()).get_line()
lines = []
for g in errorGenerator[lang]:
lines.extend(g.errorLineMsg.keys())
if len(lines) != 0:
lines.sort()
for i in lines:
if cursorLine < i - 1:
doc.goto_line(i - 1)
view.scroll_to_cursor()
return
doc.goto_line(lines[0] - 1)
view.scroll_to_cursor()
class gfly(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "gfly"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
global ui_str
self.tabwatch = TabWatch(self.window)
manager = self.window.get_ui_manager()
self.action_group = Gtk.ActionGroup("gflyPluginAction")
self.action_group.add_actions([("gfly", None, "Jump Error", jump_to_error_key, None, self.__jump_error)])
manager.insert_action_group(self.action_group, -1)
self.ui_id = manager.add_ui_from_string(ui_str)
def do_deactivate(self):
self.tabwatch.close(self.window)
def do_update_state(self):
pass
def __jump_error(self, action):
self.tabwatch.jump_error()
| utisam/gfly | gfly/__init__.py | Python | gpl-3.0 | 4,933 | 0.036084 |
################################################################################
# new_users_saver funciton
################################################################################
def newusers(m):
dict_updater()
un = m.from_user.username
if un not in DBDIC:
uid = m.from_user.id
DBDIC[un] = [uid,0]
if hasattr(m, 'new_chat_participant'):
un = m.new_chat_participant.username
if un not in DBDIC:
uid = m.new_chat_participant.id
DBDIC[un] = [uid,0]
dict_saver()
################################################################################
# "newusers" saves new users in the dictionary
# (see dict_updater_saver.py for "dict_updater()" and "dict_saver()")
################################################################################ | acasadoquijada/Telegram-bot-stuff | Stuff/new_users_saver.py | Python | gpl-2.0 | 825 | 0.008485 |
"""
@author: Stefan Peidli
License: MIT
Tags: Neural Network
"""
import numpy as np
from Board import Board
n = 9
# Testboards
def gen_test_board(method=0):
if method == 0:
b = np.zeros((n, n))
b[0, 2] = 1
b[1, 3] = 1
b[3, 3] = 1
b[2, 3] = -1
b[0, 1] = -1
b[1, 0] = -1
b[1, 1] = -1
b[2, 2] = 1
if method == 1:
b = np.round(np.random.uniform(-1, 1, (n, n)), 0)
return b
gen_test_board(1)
dxdys = [(1, 0), (0, 1), (-1, 0), (0, -1)]
# help functions
def is_on_board(n, x, y):
return 0 <= x < n and 0 <= y < n
# TODO da ist irgendwo ein fehler. libs stimmen manchmal nicht
def give_group_at_position(board, start_x, start_y):
group = [(start_x, start_y)]
checked = []
i = 0
liberts = 0
while i < len(group):
x, y = group[i]
i += 1
for dx, dy in dxdys:
adj_x, adj_y = x + dx, y + dy
if is_on_board(board.shape[0], adj_x, adj_y) and not (adj_x, adj_y) in group:
if board[adj_x, adj_y] == 0 and not (adj_x, adj_y) in checked:
liberts += 1
checked.append((adj_x, adj_y))
elif board[adj_x, adj_y] == board[start_x, start_y]:
group.append((adj_x, adj_y))
if board[start_x, start_y] == 0:
liberts = 0
return [group, liberts]
def give_liberties(board, color):
libs = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == color:
[_, li] = give_group_at_position(board, row, col)
libs[row, col] = li
return libs
# Filters
# Filters that are self-mappings
# Eyes . ID = 0
# shows the eyes of player color
def filter_eyes(board, color):
n = board.shape[0]
eyes = np.zeros((n, n))
board = board * color
for row in range(n):
for col in range(n):
if board[row, col] == 0: # only free fields can be eyes
if not(row == 0):
eyes[row, col] += board[row-1,col]
if not(row == n-1):
eyes[row, col] += board[row+1,col]
if not(col == 0):
eyes[row, col] += board[row,col-1]
if not(col == n-1):
eyes[row, col] += board[row,col+1]
eyes[0, :] += 1
eyes[-1, :] += 1
eyes[:, 0] += 1
eyes[:, -1] += 1
eyes[eyes != 4] = 0
eyes = eyes / 4
return eyes
# Shows which move will result in an eye being created (1) or destroyed (-1) . ID = 1.
# Note: Eyes by capture are created by capturing a single stone
def filter_eyes_create(board, color=1):
board.reshape((9, 9))
n = board.shape[0]
reyes = np.zeros((n, n))
eyc = np.sum(filter_eyes(board, color)) # current eyes
cap = filter_captures(board, color)
for row in range(n):
for col in range(n):
if board[row, col] == 0: # only free fields can be set
temp = board * 1 # python magic
temp[row, col] = color
eyn = np.sum(filter_eyes(temp, color)) # eyes by free creation
# actually not good line below: we can also capture two single stones with one move..
if cap[row, col] == 1: # capture one eye
eyn += 1
reyes[row, col] = eyn - eyc
return reyes
# captures ID = 2
# Shows how many stones player "color" (1=b,-1=w) would capture by playing a move on a field
def filter_captures(board, color):
board.reshape((9, 9))
n = board.shape[0]
cap = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == 0: # only free fields can be set
val = 0
if not(row == 0):
if color == board[row-1, col] * -1: # then is enemy
[group, libs] = give_group_at_position(board, row-1, col)
if libs == 1:
val = max(val, len(group))
if not(row == n-1):
if color == board[row+1, col] * -1:
[group, libs] = give_group_at_position(board, row+1, col)
if libs == 1:
val = max(val, len(group))
if not(col == 0):
if color == board[row, col-1] * -1:
[group, libs] = give_group_at_position(board, row, col-1)
if libs == 1:
val = max(val, len(group))
if not(col == n-1):
if color == board[row, col+1] * -1:
[group, libs] = give_group_at_position(board, row, col+1)
if libs == 1:
val = max(val, len(group))
cap[row, col] = val
return cap
# rewards connecting groups and adding liberties to groups. But e.g. punishes playing a move into an own eye. ID = 3.
def filter_add_liberties(board, color):
board.reshape((9, 9))
n = board.shape[0]
libmat = np.zeros((n, n))
for row in range(n):
for col in range(n):
val = 0
if board[row, col] == 0: # only free fields can be set
temp = board * 1 # do not delete this
temp[row, col] = color
[g, li] = give_group_at_position(temp, row, col)
checked = []
neighbours = 0
if not(row == 0):
if color == board[row-1, col]:
[group, libs] = give_group_at_position(board, row-1, col)
val += li - libs
neighbours += 1
checked.extend(group)
if not(row == n-1):
if color == board[row+1, col]:
[group, libs] = give_group_at_position(board, row+1, col)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += li - libs
if not(col == 0):
if color == board[row, col-1]:
[group, libs] = give_group_at_position(board, row, col-1)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += li - libs
if not(col == n-1):
if color == board[row, col+1]:
[group, libs] = give_group_at_position(board, row, col+1)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += li - libs
libmat[row, col] = val
return libmat
# measures total liberties added if move is played. ID = 4
def filter_liberization(board, color):
board.reshape((9, 9))
n = board.shape[0]
libmat = np.zeros((n, n))
for row in range(n):
for col in range(n):
val = 0
if board[row, col] == 0: # only free fields can be set
temp = board * 1 # do not delete
temp[row, col] = color
[g, li] = give_group_at_position(temp, row, col)
val = li
checked = []
neighbours = 0
if not(row == 0):
if color == board[row-1, col]:
[group, libs] = give_group_at_position(board, row-1, col)
val += - libs
neighbours += 1
checked.extend(group)
if not(row == n-1):
if color == board[row+1, col]:
[group, libs] = give_group_at_position(board, row+1, col)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += - libs
if not(col == 0):
if color == board[row, col-1]:
[group, libs] = give_group_at_position(board, row, col-1)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += - libs
if not(col == n-1):
if color == board[row, col+1]:
[group, libs] = give_group_at_position(board, row, col+1)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += - libs
libmat[row, col] = val
return libmat
# Gives all groups with their sizes as field values at the member positions of a color. ID = 5.
def filter_groups(board, color):
board.reshape((9, 9))
n = board.shape[0]
gps = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == color and gps[row, col] == 0:
[g, li] = give_group_at_position(board, row, col)
size = len(g)
for member in g:
gps[member] = size
return gps
# Gives all groups of size k of color. with_values=False unifies the output to 1 no ID yet
def filter_groups_of_size_k(board, k, color, with_values=False):
board.reshape((9, 9))
n = board.shape[0]
gps = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == color and gps[row, col] == 0:
[g, li] = give_group_at_position(board, row, col)
size = len(g)
if size == k:
for member in g:
if with_values:
gps[member] = size
else:
gps[member] = 1
return gps
# Gives all groups of color with exactly k UNSECURED eyes (i.e. only stones that
# form the eye are contained within the same group, not the diagonal stones) no ID yet
def filter_groups_eyes_unsec(board, k, color):
board.reshape((9, 9))
n = board.shape[0]
res = np.zeros((n, n))
eyes = filter_eyes(board, color)
#print(eyes)
for row in range(n):
for col in range(n):
if eyes[row, col] == 1:
temp = board * 1
temp[row, col] = color
[g, li] = give_group_at_position(temp, row, col)
is_contained = True
if not(row == 0) and (row-1, col) not in g:
is_contained = False
if not(row == n-1) and (row+1, col) not in g:
is_contained = False
if not(col == 0) and (row, col-1) not in g:
is_contained = False
if not(col == n-1) and (row, col+1) not in g:
is_contained = False
if is_contained:
for [x, y] in g:
res[x, y] += 1
res[row, col] = 0
res[res != k] = 0
res[res == k] = 1
return res
# gives the board with only the stones of one color. ID = 6. ID = 7 for opponent color.
def filter_color_separation(board, color):
temp = board * 1
# Very interesting. by *1 we make sure temp is only a copy of
# board and not board itself. Else this function changes the board!
temp[temp != color] = 0
return temp
# Gives all legal possible moves of player color on a board. Sadly without KO and super-KO... :( . ID = 8
def filter_legal_moves(board, color):
bo = Board(9)
bo.vertices = board * 1
#bo.show()
leg = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == 0:
if bo.play_is_legal(row, col, color):
leg[row, col] = 1
return leg
# The Summary Fiter Function
def apply_filters_by_id(board, color, filter_id=[0, 1, 2, 3, 4, 5, 6, 7, 8]):
filtered = []
if 0 in filter_id:
f0 = filter_eyes(board, color).flatten()
filtered.extend(f0)
if 1 in filter_id:
f1 = filter_eyes_create(board, color).flatten()
filtered.extend(f1)
if 2 in filter_id:
f2 = filter_captures(board, color).flatten()
filtered.extend(f2)
if 3 in filter_id:
f3 = filter_add_liberties(board, color).flatten()
filtered.extend(f3)
if 4 in filter_id:
f4 = filter_liberization(board, color).flatten()
filtered.extend(f4)
if 5 in filter_id:
f5 = filter_groups(board, color).flatten()
filtered.extend(f5)
if 6 in filter_id:
f6 = filter_color_separation(board, color).flatten()
filtered.extend(f6)
if 7 in filter_id:
f7 = filter_color_separation(board, -color).flatten()
filtered.extend(f7)
if 8 in filter_id:
f8 = filter_legal_moves(board, color).flatten()
filtered.extend(f8)
return filtered
# Tests
def test():
b = gen_test_board(1)
print("Board")
print(b)
white_eyes = filter_eyes(b,1)
print("Eyes white")
print(white_eyes)
black_eyes = filter_eyes(b,-1)
print("Eyes black")
print(black_eyes)
w_e_r = filter_eyes_create(b,1)
print("Eyes white can create")
print(w_e_r)
b_e_r = filter_eyes_create(b,-1)
print("Eyes black can create")
print(b_e_r)
libs_w = give_liberties(b,1)
print("Liberties white")
print(libs_w)
libs_b = give_liberties(b,-1)
print("Liberties black")
print(libs_b)
cap_w = filter_captures(b,1)
print("Captures white")
print(cap_w)
cap_b = filter_captures(b,-1)
print("Captures black")
print(cap_b)
add_lib_w = filter_add_liberties(b,1)
print("Liberties added to groups of white")
print(add_lib_w)
add_lib_b = filter_add_liberties(b,-1)
print("Liberties added to groups of black")
print(add_lib_b)
liber_w = filter_liberization(b, 1)
print("Liberization of white")
print(liber_w)
liber_b = filter_liberization(b, -1)
print("Liberization of black")
print(liber_b)
leg_w = filter_legal_moves(b, 1)
print("Legal moves of white")
print(leg_w)
leg_b = filter_legal_moves(b, 1)
print("Legal moves of black")
print(leg_b)
#test()
def test1():
b = gen_test_board(0)
filtered = apply_filters_by_id(b, 1)
print(filtered)
print('...')
one_vec = apply_filters_by_id(b, -1)
filtered = []
for i in range(8): # in range(no_of_filters + 1)
filtered.append(one_vec[i * 81:(i + 1) * 81])
print(filtered)
#test1()
def test2():
b = gen_test_board(0)
print(b)
F = filter_color_separation(b, 1)
F2 = 0.5 * (F + F.T)
F3 = 0.5 * (F - F.T)
print(F)
print(F2)
print(F3)
#test2() | stefanpeidli/GoNet | Filters.py | Python | mit | 15,577 | 0.004365 |
# encoding: utf8
from sympy import Add
from uncertainties import __version_info__ as uncert_version
from uncertainties import ufloat, ufloat_fromstr
from uncertainties.core import Variable, AffineScalarFunc
if uncert_version < (3, 0):
raise Warning("Your version of uncertanties is not supported. Try\n"
"$ sudo pip install uncertainties --upgrade")
class Series:
"""
The class that provides the expansion in powers of g up to the n-th order,
taking the error into account.
"""
def __init__(self, n, d={0: 0}, name='g', analytic=None):
"""
Example:
`z2 = Series(3, {0: ufloat(-1, 0.4), 1: ufloat(-2, .004), 2: ufloat(999, .1)})`
will give:
Z₂(g) = -1.0(4) - 2.000(4) g + 999.00(10) g²
:param n: number of the "known" orders, `int`
:param d: dictionary with k=powers, v=`ufloat`s
:param name: name of the series variable, arbitrary character, default is `'g'`
:param analytic: boolean
"""
self.n = n
self.gSeries = d
self.name = name
for k, v in d.items():
if isinstance(v, AffineScalarFunc):
self.gSeries[k] = v
elif isinstance(v, (list, tuple)):
self.gSeries[k] = ufloat(v[0], v[1])
elif isinstance(v, str):
self.gSeries[k] = ufloat_fromstr(v)
elif isinstance(v, int):
self.gSeries[k] = v
self.analytic = True
else:
raise TypeError("Series constructor warning: Type(v)={}".format(type(v)))
if analytic is not None:
# XXX: if defined explicitly:
self.analytic = bool(analytic)
else:
# XXX: if all values are ints assume analytic
self.analytic = all(map(lambda x: type(x) == int, d.values()))
for i in range(0, n):
if i not in d.keys():
if self.analytic:
self.gSeries[i] = 0
else:
self.gSeries[i] = ufloat(0, 0)
def __lt__(self, other):
return len(self.gSeries) < len(other.gSeries)
def __add__(self, other):
tmp = dict(self.gSeries)
# print "From __add__:",self.analytic," + ",other.pprint() ## FIXME
if isinstance(other, Series):
stop = min(self.n, other.n)
if stop == 0:
stop = max(self.n, other.n)
for g in other.gSeries.keys():
if g <= stop:
try:
tmp[g] += other.gSeries[g]
except KeyError:
tmp[g] = other.gSeries[g]
elif isinstance(other, (int, float)):
tmp[0] += other
else:
print("{} {}".format(type(self), type(other)))
raise NotImplementedError
return Series(len(tmp), tmp, name=self.name, analytic=self.analytic)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + (-1) * other
def __mul__(self, other):
tmp = {}
if isinstance(other, Series):
stop = min(self.n, other.n)
for i in self.gSeries.keys():
for j in other.gSeries.keys():
if (i + j) <= stop:
try:
tmp[i + j] += self.gSeries[i] * other.gSeries[j]
except KeyError:
tmp[i + j] = self.gSeries[i] * other.gSeries[j]
res = Series(max(self.n, other.n), tmp, name=self.name, analytic=self.analytic)
elif isinstance(other, (int, float, Variable, AffineScalarFunc, Add)):
for i in self.gSeries.keys():
tmp[i] = self.gSeries[i] * other
res = Series(self.n, tmp, name=self.name, analytic=self.analytic)
elif other == 0 or sum(map(lambda v: v == 0, self.gSeries.values())) == len(self.gSeries):
return 0
# elif isinstance(other, sympy.core.add.Add):
# print "\n\nself=",self
# print "other=",other
# return 0
else:
print("\nself = {}, type(self) = {}".format(self.gSeries, type(self)))
print("\nother = {}, type(other) = {}".format(other, type(other)))
raise NotImplementedError
return res
def __rmul__(self, other):
return self * other
def __neg__(self):
return self * (-1)
def __invert__(self):
""" Z.__invert__() = 1/Z
1/(1+x)=Sum_i (-1)^i x^i
"""
res = Series(self.n, {}, self.name, analytic=self.analytic)
if self.gSeries[0] == 1:
c = 1.
normed_series = self + Series(self.n, {0: -1}, self.name, analytic=self.analytic) # <-- it's -1!
elif self.gSeries[0] != 0:
c = 1. / self.gSeries[0]
normed_series = self / self.gSeries[0] + Series(self.n, {0: -1}, self.name,
analytic=self.analytic) # <-- it's -1!
else:
raise NotImplementedError("no constant term in series: %s" % self.gSeries)
# if self.gSeries[0] == 1:
# tmp = Series(self.gSeries[1:], n = self.n-1, name=self.name)
# for i in range(tmp.n):
for i in range(len(self.gSeries)):
res += (-1) ** i * normed_series ** i
return res * c
def __div__(self, other):
""" For now we assume all the powers of g as non-negative
"""
if isinstance(other, Series):
return self * other.__invert__()
elif isinstance(other, (int, float, Variable, AffineScalarFunc)):
return self * (1. / other)
else:
raise NotImplementedError("type: {}; {}".format(type(other), other.__repr__()))
def __rdiv__(self, other):
return other * self.__invert__()
def __pow__(self, power, modulo=None):
if isinstance(power, int) and power > 1:
return reduce(lambda x, y: x * y, [self] * power)
elif isinstance(power, int) and power == 1:
return self
elif isinstance(power, int) and power == 0:
if self.analytic:
return Series(self.n, {0: 1}, self.name, analytic=self.analytic)
else:
return Series(self.n, {0: ufloat(1, 0)}, self.name, analytic=self.analytic)
else:
print("power = {}, type(power) = {}".format(power, type(power)))
raise NotImplementedError
def diff(self):
"""
Differentiation of the polynomial in g
"""
res = {}
for i in range(len(self.gSeries) - 1):
res[i] = (i + 1) * self.gSeries[i + 1]
return Series(self.n, res, analytic=self.analytic)
def __repr__(self):
return self.pprint()
## FIXME
def _approx(self, other):
for k, v in self.gSeries.items():
if v != other.gSeries[k]:
return False
return True
def __str__(self):
"""
The result is truncated according to the error, indicating the accuracy of the least significant digit
"""
res = ''
for g, c in self.gSeries.items():
if c != 0 and g == 0 and isinstance(c, int):
res += " %d + " % (c)
elif c != 0 and g == 0:
res += " %s + " % (c.format('S'))
elif c != 0 and g <= self.n and isinstance(c, (Variable, AffineScalarFunc)):
if c.s < 1e-14:
res += "%s * %s**%s + " % (str(c.n), self.name, str(g))
else:
res += " %s * %s**%s + " % (c.format('S'), self.name, str(g))
elif c != 0 and g <= self.n and isinstance(c, (int, float)):
res += "%s * %s**%s + " % (str(c), self.name, str(g))
return res[:-3] or '0'
def coeffs(self):
"""
"""
return map(lambda x: float(x.format('S').split("(")[0]), self.gSeries.values())[:self.n + 1]
def pprint(self):
res = ""
for g, c in self.gSeries.items():
if c != 0 and g <= self.n and not self.analytic:
res += "(%s ± %s) * %s**%s + " % (str(c.n), str(c.s), self.name, str(g))
elif c != 0 and g <= self.n and self.analytic:
try:
this_term = c.format('S')
except AttributeError:
this_term = str(c)
res += "(%s) * %s**%s + " % (this_term, self.name, str(g))
return res[:-3] or '0'
def __len__(self):
return len(self.gSeries)
def subs(self, point):
res = Series(n=self.n, d={0: ufloat(0, 0)}, name=point.name, analytic=self.analytic)
for i, c in self.gSeries.items():
res += c * (point ** i)
return res
def save(self):
"""Save value to file"""
slov = ''
for k, v in self.gSeries.items():
slov += "%d: '%s', " % (k, v)
print("Series({}, {}, '{}')".format(self.n, slov, self.name))
if __name__ == "__main__":
Z1 = Series(1)
Z2 = Series(2, {0: ufloat(-4, 0.3), 1: ufloat(2, .002)})
print("Z1 = {}".format(Z1))
print("Z2 = {}".format(Z2))
print("Z2.diff() = {}".format(Z2.diff()))
print("Z2 - Z2 = {}".format(Z2-Z2))
print("1/Z2 = {}".format(1 / Z2))
print("Z1*Z2 = {}".format(Z1 * Z2))
print("Z2**2 = {}".format(Z2 ** 2))
| kirienko/unseries | unseries.py | Python | gpl-3.0 | 9,529 | 0.002205 |
#
# eventmanager.py
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import logging
import deluge.component as component
log = logging.getLogger(__name__)
class EventManager(component.Component):
def __init__(self):
component.Component.__init__(self, "EventManager")
self.handlers = {}
def emit(self, event):
"""
Emits the event to interested clients.
:param event: DelugeEvent
"""
# Emit the event to the interested clients
component.get("RPCServer").emit_event(event)
# Call any handlers for the event
if event.name in self.handlers:
for handler in self.handlers[event.name]:
#log.debug("Running handler %s for event %s with args: %s", event.name, handler, event.args)
try:
handler(*event.args)
except Exception, e:
log.error("Event handler %s failed in %s with exception %s", event.name, handler, e)
def register_event_handler(self, event, handler):
"""
Registers a function to be called when a `:param:event` is emitted.
:param event: str, the event name
:param handler: function, to be called when `:param:event` is emitted
"""
if event not in self.handlers:
self.handlers[event] = []
if handler not in self.handlers[event]:
self.handlers[event].append(handler)
def deregister_event_handler(self, event, handler):
"""
Deregisters an event handler function.
:param event: str, the event name
:param handler: function, currently registered to handle `:param:event`
"""
if event in self.handlers and handler in self.handlers[event]:
self.handlers[event].remove(handler)
| Tydus/deluge | deluge/core/eventmanager.py | Python | gpl-3.0 | 3,162 | 0.001265 |
#!/usr/bin/python
# -*- coding: utf-8 -*
"""
The MIT License (MIT)
Copyright (c) 2015 Christophe Aubert
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "Christophe Aubert"
__version__ = "1.0"
import SqlCommand
class CreateMysqlTable(SqlCommand.SqlCommand):
def __init__(self):
self.sqlCommand = []
def createTable(self):
# Table: sensorType
#------------------------------------------------------------
sensorType = "CREATE TABLE sensorType( "\
"st_id INT (11) Auto_increment NOT NULL ,"\
"st_type VARCHAR (50) NOT NULL ,"\
"PRIMARY KEY (st_id )"\
")ENGINE=InnoDB;"
self.sqlCommand.append(sensorType)
#------------------------------------------------------------
# Table: measure
#------------------------------------------------------------
measure = "CREATE TABLE measure( "\
"m_id INT (11) Auto_increment NOT NULL ,"\
"m_date INT NOT NULL ,"\
"m_value FLOAT NOT NULL ,"\
"s_id INT NOT NULL ,"\
"PRIMARY KEY (m_id )"\
")ENGINE=InnoDB;"
self.sqlCommand.append(measure)
#------------------------------------------------------------
# Table: sensor
#------------------------------------------------------------
sensor = "CREATE TABLE sensor( "\
"s_id INT (11) Auto_increment NOT NULL , "\
"st_id INT NOT NULL , "\
"sta_id INT NOT NULL , "\
"PRIMARY KEY (s_id ) "\
")ENGINE=InnoDB;"
self.sqlCommand.append(sensor)
#------------------------------------------------------------
# Table: station
#------------------------------------------------------------
station = "CREATE TABLE station( "\
"sta_id INT (11) Auto_increment NOT NULL , "\
"sta_name VARCHAR (50) NOT NULL , "\
"sta_longitude FLOAT NOT NULL , "\
"sta_latitude FLOAT NOT NULL, "\
"sta_installDate INT NOT NULL, "\
"PRIMARY KEY (sta_id ) "\
")ENGINE=InnoDB;"
self.sqlCommand.append(station)
#------------------------------------------------------------
# Table: user
#------------------------------------------------------------
user = "CREATE TABLE user ( "\
"u_id INT (11) Auto_increment NOT NULL,"\
"u_lastName VARCHAR(30) NOT NULL,"\
"u_firstName VARCHAR(30) NOT NULL,"\
"u_description VARCHAR(200) NOT NULL,"\
"PRIMARY KEY (u_id)"\
")ENGINE=InnoDB;"
self.sqlCommand.append(user)
#------------------------------------------------------------
# Table: connection
#------------------------------------------------------------
connection = "CREATE TABLE connection ( "\
"c_id INT (11) Auto_increment NOT NULL,"\
"u_id INT NOT NULL,"\
"c_login VARCHAR(30) NOT NULL,"\
"c_password VARCHAR (50) NOT NULL ,"\
"c_adminKey BOOLEAN DEFAULT NULL,"\
"PRIMARY KEY(c_id)"\
")ENGINE=InnoDB;"
self.sqlCommand.append(connection)
stationAccess = "CREATE TABLE stationAccess ( "\
"staa_id INT (11) Auto_increment NOT NULL,"\
"u_id INT NOT NULL ,"\
"sta_id INT NOT NULL ,"\
"PRIMARY KEY(staa_id)"\
")ENGINE=InnoDB;"
self.sqlCommand.append(stationAccess)
#------------------------------------------------------------
# ALTER TABLE
#------------------------------------------------------------
atMeasure = "ALTER TABLE measure ADD CONSTRAINT FK_measure_s_id "\
"FOREIGN KEY (s_id) REFERENCES sensor(s_id);"
self.sqlCommand.append(atMeasure)
atsensor = "ALTER TABLE sensor ADD CONSTRAINT FK_sensor_st_id "\
"FOREIGN KEY (st_id) REFERENCES sensorType(st_id);"
self.sqlCommand.append(atsensor)
atsensor2 = "ALTER TABLE sensor ADD CONSTRAINT FK_sensor_sta_id "\
"FOREIGN KEY (sta_id) REFERENCES station(sta_id);"
self.sqlCommand.append(atsensor2)
atConnection = "ALTER TABLE connection ADD CONSTRAINT FK_connection_u_id "\
"FOREIGN KEY (u_id) REFERENCES user(u_id)"
self.sqlCommand.append(atConnection)
atstationAccess = "ALTER TABLE stationAccess ADD CONSTRAINT FK_stationAccess_u_id "\
"FOREIGN KEY (u_id) REFERENCES user(u_id)"
self.sqlCommand.append(atstationAccess)
atstationAccess2 = "ALTER TABLE stationAccess ADD CONSTRAINT FK_stationAccess_sta_id "\
"FOREIGN KEY (sta_id) REFERENCES station(sta_id)"
self.sqlCommand.append(atstationAccess2)
def getSQL(self):
return self.sqlCommand | Bideau/SmartForrest | RaspberryPi/dataBase/mysql/CreateMysqlTable.py | Python | mit | 6,863 | 0.006994 |
# -*- coding: utf-8 -*-
#************************************************************************
#
# TeX-9 library: Python module
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Elias Toivanen, 2011-2014
#
#************************************************************************
import re
import vim
import sys
# Utility functions
def echoerr(errorstr):
sys.stderr.write("TeX-9: {0}\n".format(str(errorstr)))
def echomsg(msgstr):
sys.stdout.write("TeX-9: {0}\n".format(str(msgstr)))
def get_latex_environment(vim_window):
"""Get information about the current LaTeX environment.
Returns a dictionary with keys
'environment': the name of the current LaTeX environment
'range': 2-tuple of the beginning and ending line numbers
"""
pat = re.compile(r'^\s*\\(begin|end){([^}]+)}')
b = list(vim_window.buffer)
row = vim_window.cursor[0] - 1
environment = ""
begin = end = 0
current_line = b[row]
head = b[row - 1::-1] # From line above to the start
tail = b[row + 1:] # From next line to the end
c = pat.match(current_line)
if c:
environment = c.group(2)
if c.group(1) == 'end':
end = row + 1
elif c.group(1) == 'begin':
begin = row + 1
if not begin:
envs = {}
for i, line in enumerate(head):
m = pat.match(line)
if m:
e = m.group(2)
envs[m.groups()] = i
if ('begin', e) in envs and ('end', e) in envs and envs[('end', e)] < envs[('begin', e)]:
# Eliminate nested environments
del envs[('begin', e)]
del envs[('end', e)]
elif ('end', e) not in envs:
begin = row - i
environment = e
break
if not end:
envs = {}
for i, line in enumerate(tail):
m = pat.match(line)
if m:
envs[m.groups()] = i
e = m.group(2)
if ('begin', e) in envs and ('end', e) in envs:
#and envs[('end', e)] > envs[('begin', e)]:
# Eliminate nested environments
del envs[('begin', e)]
del envs[('end', e)]
elif m.groups() == ('end', environment):
end = row + i + 2
break
return {'environment': environment, 'range': (begin, end)}
def is_latex_math_environment(vim_window,
environments = re.compile(r"matrix|cases|math|equation|align|array")):
"""Returns True if the cursor is currently on a maths environment."""
e = get_latex_environment(vim_window)
return bool(environments.search(e['environment']))
def find_compiler(vimbuffer, nlines=10):
"""Finds the compiler from the header."""
lines = "\n".join(vimbuffer[:nlines])
if lines:
c = re.search("^%\s*Compiler:\s*(\S+)", lines, re.M)
if c:
return c.group(1).strip()
else:
return ""
else:
#Cannot determine the compiler
return ""
class TeXNineError(Exception):
pass
| vim-scripts/TeX-9 | ftplugin/tex_nine/tex_nine_utils.py | Python | gpl-3.0 | 3,875 | 0.005935 |
"""Tests for HTMLParser.py."""
import html.parser
import pprint
import unittest
from test import support
class EventCollector(html.parser.HTMLParser):
def __init__(self, *args, **kw):
self.events = []
self.append = self.events.append
html.parser.HTMLParser.__init__(self, *args, **kw)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class TestCaseBase(unittest.TestCase):
def get_collector(self):
raise NotImplementedError
def _run_check(self, source, expected_events, collector=None):
if collector is None:
collector = self.get_collector()
parser = collector
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events\n"
"Expected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events, EventCollectorExtra())
def _parse_error(self, source):
def parse(source=source):
parser = self.get_collector()
parser.feed(source)
parser.close()
self.assertRaises(html.parser.HTMLParseError, parse)
class HTMLParserStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True)
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b-->
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_malformatted_charref(self):
self._run_check("<p>&#bad;</p>", [
("starttag", "p", []),
("data", "&#bad;"),
("endtag", "p"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html
dtds = ['HTML', # HTML5 doctype
('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd"'),
('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" '
'"http://www.w3.org/TR/html4/loose.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"'),
('math PUBLIC "-//W3C//DTD MathML 2.0//EN" '
'"http://www.w3.org/Math/DTD/mathml2/mathml2.dtd"'),
('html PUBLIC "-//W3C//DTD '
'XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" '
'"http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"'),
('svg PUBLIC "-//W3C//DTD SVG 1.1//EN" '
'"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"'),
'html PUBLIC "-//IETF//DTD HTML 2.0//EN"',
'html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"']
for dtd in dtds:
self._run_check("<!DOCTYPE %s>" % dtd,
[('decl', 'DOCTYPE ' + dtd)])
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
contents = [
'<!-- not a comment --> ¬-an-entity-ref;',
"<not a='start tag'>",
'<a href="" /> <p> <span></span>',
'foo = "</scr" + "ipt>";',
'foo = "</SCRIPT" + ">";',
'foo = <\n/script> ',
'<!-- document.write("</scr" + "ipt>"); -->',
('\n//<![CDATA[\n'
'document.write(\'<s\'+\'cript type="text/javascript" '
'src="http://www.example.org/r=\'+new '
'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'),
'\n<!-- //\nvar foo = 3.14;\n// -->\n',
'foo = "</sty" + "le>";',
'<!-- \u2603 -->',
# these two should be invalid according to the HTML 5 spec,
# section 8.1.2.2
#'foo = </\nscript>',
#'foo = </ script>',
]
elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style']
for content in contents:
for element in elements:
element_lower = element.lower()
s = '<{element}>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)])
def test_cdata_with_closing_tags(self):
# see issue #13358
# make sure that HTMLParser calls handle_data only once for each CDATA.
# The normal event collector normalizes the events in get_events,
# so we override it to return the original list of events.
class Collector(EventCollector):
def get_events(self):
return self.events
content = """<!-- not a comment --> ¬-an-entity-ref;
<a href="" /> </p><p> <span></span></style>
'</script' + '>'"""
for element in [' script', 'script ', ' script ',
'\nscript', 'script\n', '\nscript\n']:
element_lower = element.lower().strip()
s = '<script>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)],
collector=Collector())
def test_comments(self):
html = ("<!-- I'm a valid comment -->"
'<!--me too!-->'
'<!------>'
'<!---->'
'<!----I have many hyphens---->'
'<!-- I have a > in the middle -->'
'<!-- and I have -- in the middle! -->')
expected = [('comment', " I'm a valid comment "),
('comment', 'me too!'),
('comment', '--'),
('comment', ''),
('comment', '--I have many hyphens--'),
('comment', ' I have a > in the middle '),
('comment', ' and I have -- in the middle! ')]
self._run_check(html, expected)
def test_condcoms(self):
html = ('<!--[if IE & !(lte IE 8)]>aren\'t<![endif]-->'
'<!--[if IE 8]>condcoms<![endif]-->'
'<!--[if lte IE 7]>pretty?<![endif]-->')
expected = [('comment', "[if IE & !(lte IE 8)]>aren't<![endif]"),
('comment', '[if IE 8]>condcoms<![endif]'),
('comment', '[if lte IE 7]>pretty?<![endif]')]
self._run_check(html, expected)
class HTMLParserTolerantTestCase(HTMLParserStrictTestCase):
def get_collector(self):
return EventCollector(strict=False)
def test_tolerant_parsing(self):
self._run_check('<html <html>te>>xt&a<<bc</a></html>\n'
'<img src="URL><//img></html</html>', [
('starttag', 'html', [('<html', None)]),
('data', 'te>>xt'),
('entityref', 'a'),
('data', '<<bc'),
('endtag', 'a'),
('endtag', 'html'),
('data', '\n<img src="URL>'),
('comment', '/img'),
('endtag', 'html<')])
def test_starttag_junk_chars(self):
self._run_check("</>", [])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
self._run_check("</a", [('data', '</a')])
# XXX this might be wrong
self._run_check("<a<a>", [('data', '<a'), ('starttag', 'a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
self._run_check("<!", [('data', '<!')])
self._run_check("<a", [('data', '<a')])
self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
self._run_check("<a foo='bar", [('data', "<a foo='bar")])
self._run_check("<a foo='>'", [('data', "<a foo='>'")])
self._run_check("<a foo='>", [('data', "<a foo='>")])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
html = ('<img width=902 height=250px '
'src="/sites/default/files/images/homepage/foo.jpg" '
'/*what am I doing here*/ />')
expected = [(
'startendtag', 'img',
[('width', '902'), ('height', '250px'),
('src', '/sites/default/files/images/homepage/foo.jpg'),
('*what', None), ('am', None), ('i', None),
('doing', None), ('here*', None)]
)]
self._run_check(html, expected)
html = ('<a / /foo/ / /=/ / /bar/ / />'
'<a / /foo/ / /=/ / /bar/ / >')
expected = [
('startendtag', 'a', [('foo', None), ('=', None), ('bar', None)]),
('starttag', 'a', [('foo', None), ('=', None), ('bar', None)])
]
self._run_check(html, expected)
#see issue #14538
html = ('<meta><meta / ><meta // ><meta / / >'
'<meta/><meta /><meta //><meta//>')
expected = [
('starttag', 'meta', []), ('starttag', 'meta', []),
('starttag', 'meta', []), ('starttag', 'meta', []),
('startendtag', 'meta', []), ('startendtag', 'meta', []),
('startendtag', 'meta', []), ('startendtag', 'meta', []),
]
self._run_check(html, expected)
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
def test_illegal_declarations(self):
self._run_check('<!spacer type="block" height="25">',
[('comment', 'spacer type="block" height="25"')])
def test_with_unquoted_attributes(self):
# see #12008
html = ("<html><body bgcolor=d0ca90 text='181008'>"
"<table cellspacing=0 cellpadding=1 width=100% ><tr>"
"<td align=left><font size=-1>"
"- <a href=/rabota/><span class=en> software-and-i</span></a>"
"- <a href='/1/'><span class=en> library</span></a></table>")
expected = [
('starttag', 'html', []),
('starttag', 'body', [('bgcolor', 'd0ca90'), ('text', '181008')]),
('starttag', 'table',
[('cellspacing', '0'), ('cellpadding', '1'), ('width', '100%')]),
('starttag', 'tr', []),
('starttag', 'td', [('align', 'left')]),
('starttag', 'font', [('size', '-1')]),
('data', '- '), ('starttag', 'a', [('href', '/rabota/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' software-and-i'),
('endtag', 'span'), ('endtag', 'a'),
('data', '- '), ('starttag', 'a', [('href', '/1/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' library'),
('endtag', 'span'), ('endtag', 'a'), ('endtag', 'table')
]
self._run_check(html, expected)
def test_comma_between_attributes(self):
self._run_check('<form action="/xxx.php?a=1&b=2&", '
'method="post">', [
('starttag', 'form',
[('action', '/xxx.php?a=1&b=2&'),
(',', None), ('method', 'post')])])
def test_weird_chars_in_unquoted_attribute_values(self):
self._run_check('<form action=bogus|&#()value>', [
('starttag', 'form',
[('action', 'bogus|&#()value')])])
def test_invalid_end_tags(self):
# A collection of broken end tags. <br> is used as separator.
# see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state
# and #13993
html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>'
'</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>')
expected = [('starttag', 'br', []),
# < is part of the name, / is discarded, p is an attribute
('endtag', 'label<'),
('starttag', 'br', []),
# text and attributes are discarded
('endtag', 'div'),
('starttag', 'br', []),
# comment because the first char after </ is not a-zA-Z
('comment', '<h4'),
('starttag', 'br', []),
# attributes are discarded
('endtag', 'li'),
('starttag', 'br', []),
# everything till ul (included) is discarded
('endtag', 'li'),
('starttag', 'br', []),
# </> is ignored
('starttag', 'br', [])]
self._run_check(html, expected)
def test_broken_invalid_end_tag(self):
# This is technically wrong (the "> shouldn't be included in the 'data')
# but is probably not worth fixing it (in addition to all the cases of
# the previous test, it would require a full attribute parsing).
# see #13993
html = '<b>This</b attr=">"> confuses the parser'
expected = [('starttag', 'b', []),
('data', 'This'),
('endtag', 'b'),
('data', '"> confuses the parser')]
self._run_check(html, expected)
def test_correct_detection_of_start_tags(self):
# see #13273
html = ('<div style="" ><b>The <a href="some_url">rain</a> '
'<br /> in <span>Spain</span></b></div>')
expected = [
('starttag', 'div', [('style', '')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
('data', ' '),
('startendtag', 'br', []),
('data', ' in '),
('starttag', 'span', []),
('data', 'Spain'),
('endtag', 'span'),
('endtag', 'b'),
('endtag', 'div')
]
self._run_check(html, expected)
html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>'
expected = [
('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
]
self._run_check(html, expected)
def test_unescape_function(self):
p = self.get_collector()
self.assertEqual(p.unescape('&#bad;'),'&#bad;')
self.assertEqual(p.unescape('&'),'&')
# see #12888
self.assertEqual(p.unescape('{ ' * 1050), '{ ' * 1050)
# see #15156
self.assertEqual(p.unescape('ÉricÉric'
'&alphacentauriαcentauri'),
'ÉricÉric&alphacentauriαcentauri')
self.assertEqual(p.unescape('&co;'), '&co;')
def test_broken_comments(self):
html = ('<! not really a comment >'
'<! not a comment either -->'
'<! -- close enough -->'
'<!><!<-- this was an empty comment>'
'<!!! another bogus comment !!!>')
expected = [
('comment', ' not really a comment '),
('comment', ' not a comment either --'),
('comment', ' -- close enough --'),
('comment', ''),
('comment', '<-- this was an empty comment'),
('comment', '!! another bogus comment !!!'),
]
self._run_check(html, expected)
def test_broken_condcoms(self):
# these condcoms are missing the '--' after '<!' and before the '>'
html = ('<![if !(IE)]>broken condcom<![endif]>'
'<![if ! IE]><link href="favicon.tiff"/><![endif]>'
'<![if !IE 6]><img src="firefox.png" /><![endif]>'
'<![if !ie 6]><b>foo</b><![endif]>'
'<![if (!IE)|(lt IE 9)]><img src="mammoth.bmp" /><![endif]>')
# According to the HTML5 specs sections "8.2.4.44 Bogus comment state"
# and "8.2.4.45 Markup declaration open state", comment tokens should
# be emitted instead of 'unknown decl', but calling unknown_decl
# provides more flexibility.
# See also Lib/_markupbase.py:parse_declaration
expected = [
('unknown decl', 'if !(IE)'),
('data', 'broken condcom'),
('unknown decl', 'endif'),
('unknown decl', 'if ! IE'),
('startendtag', 'link', [('href', 'favicon.tiff')]),
('unknown decl', 'endif'),
('unknown decl', 'if !IE 6'),
('startendtag', 'img', [('src', 'firefox.png')]),
('unknown decl', 'endif'),
('unknown decl', 'if !ie 6'),
('starttag', 'b', []),
('data', 'foo'),
('endtag', 'b'),
('unknown decl', 'endif'),
('unknown decl', 'if (!IE)|(lt IE 9)'),
('startendtag', 'img', [('src', 'mammoth.bmp')]),
('unknown decl', 'endif')
]
self._run_check(html, expected)
class AttributesStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True)
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])])
self._run_check("""<a b='' c="">""",
[("starttag", "a", [("b", ""), ("c", "")])])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>",
[("starttag", "e", [("a", "rgb(1,2,3)")])])
# Regression test for SF bug #921657.
self._run_check(
"<a href=mailto:xyz@example.com>",
[("starttag", "a", [("href", "mailto:xyz@example.com")])])
def test_attr_nonascii(self):
# see issue 7311
self._run_check(
"<img src=/foo/bar.png alt=\u4e2d\u6587>",
[("starttag", "img", [("src", "/foo/bar.png"),
("alt", "\u4e2d\u6587")])])
self._run_check(
"<a title='\u30c6\u30b9\u30c8' href='\u30c6\u30b9\u30c8.html'>",
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
self._run_check(
'<a title="\u30c6\u30b9\u30c8" href="\u30c6\u30b9\u30c8.html">',
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
def test_attr_entity_replacement(self):
self._run_check(
"<a b='&><"''>",
[("starttag", "a", [("b", "&><\"'")])])
def test_attr_funky_names(self):
self._run_check(
"<a a.b='v' c:d=v e-f=v>",
[("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")])])
def test_entityrefs_in_attributes(self):
self._run_check(
"<html foo='€&aa&unsupported;'>",
[("starttag", "html", [("foo", "\u20AC&aa&unsupported;")])])
class AttributesTolerantTestCase(AttributesStrictTestCase):
def get_collector(self):
return EventCollector(strict=False)
def test_attr_funky_names2(self):
self._run_check(
"<a $><b $=%><c \=/>",
[("starttag", "a", [("$", None)]),
("starttag", "b", [("$", "%")]),
("starttag", "c", [("\\", "/")])])
def test_entities_in_attribute_value(self):
# see #1200313
for entity in ['&', '&', '&', '&']:
self._run_check('<a href="%s">' % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href='%s'>" % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href=%s>" % entity,
[("starttag", "a", [("href", "&")])])
def test_malformed_attributes(self):
# see #13357
html = (
"<a href=test'style='color:red;bad1'>test - bad1</a>"
"<a href=test'+style='color:red;ba2'>test - bad2</a>"
"<a href=test' style='color:red;bad3'>test - bad3</a>"
"<a href = test' style='color:red;bad4' >test - bad4</a>"
)
expected = [
('starttag', 'a', [('href', "test'style='color:red;bad1'")]),
('data', 'test - bad1'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'+style='color:red;ba2'")]),
('data', 'test - bad2'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]),
('data', 'test - bad3'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]),
('data', 'test - bad4'), ('endtag', 'a')
]
self._run_check(html, expected)
def test_malformed_adjacent_attributes(self):
# see #12629
self._run_check('<x><y z=""o"" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('o""', None)]),
('endtag', 'x')])
self._run_check('<x><y z="""" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('""', None)]),
('endtag', 'x')])
# see #755670 for the following 3 tests
def test_adjacent_attributes(self):
self._run_check('<a width="100%"cellspacing=0>',
[("starttag", "a",
[("width", "100%"), ("cellspacing","0")])])
self._run_check('<a id="foo"class="bar">',
[("starttag", "a",
[("id", "foo"), ("class","bar")])])
def test_missing_attribute_value(self):
self._run_check('<a v=>',
[("starttag", "a", [("v", "")])])
def test_javascript_attribute_value(self):
self._run_check("<a href=javascript:popup('/popup/help.html')>",
[("starttag", "a",
[("href", "javascript:popup('/popup/help.html')")])])
def test_end_tag_in_attribute_value(self):
# see #1745761
self._run_check("<a href='http://www.example.org/\">;'>spam</a>",
[("starttag", "a",
[("href", "http://www.example.org/\">;")]),
("data", "spam"), ("endtag", "a")])
def test_main():
support.run_unittest(HTMLParserStrictTestCase, HTMLParserTolerantTestCase,
AttributesStrictTestCase, AttributesTolerantTestCase)
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_htmlparser.py | Python | mit | 30,373 | 0.001152 |
#!/usr/bin/env python
import wx
import images
#----------------------------------------------------------------------
text = """\
Right-click on any bare area of this panel (or Ctrl-click on Macs
if you don't have a multi-button mouse) to show a popup menu.
Then look at the code for this sample. Notice how the PopupMenu
method is similar to the ShowModal method of a wx.Dialog in that
it doesn't return until the popup menu has been dismissed. The
event handlers for the popup menu items can either be attached to
the menu itself, or to the window that invokes PopupMenu.
"""
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
box = wx.BoxSizer(wx.VERTICAL)
# Make and layout the controls
fs = self.GetFont().GetPointSize()
bf = wx.Font(fs+4, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
nf = wx.Font(fs+2, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
t = wx.StaticText(self, -1, "PopupMenu")
t.SetFont(bf)
box.Add(t, 0, wx.CENTER|wx.ALL, 5)
box.Add(wx.StaticLine(self, -1), 0, wx.EXPAND)
box.Add((10,20))
t = wx.StaticText(self, -1, text)
t.SetFont(nf)
box.Add(t, 0, wx.CENTER|wx.ALL, 5)
t.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
self.SetSizer(box)
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def OnContextMenu(self, event):
self.log.WriteText("OnContextMenu\n")
# only do this part the first time so the events are only bound once
#
# Yet another anternate way to do IDs. Some prefer them up top to
# avoid clutter, some prefer them close to the object of interest
# for clarity.
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.popupID3 = wx.NewId()
self.popupID4 = wx.NewId()
self.popupID5 = wx.NewId()
self.popupID6 = wx.NewId()
self.popupID7 = wx.NewId()
self.popupID8 = wx.NewId()
self.popupID9 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnPopupOne, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.OnPopupTwo, id=self.popupID2)
self.Bind(wx.EVT_MENU, self.OnPopupThree, id=self.popupID3)
self.Bind(wx.EVT_MENU, self.OnPopupFour, id=self.popupID4)
self.Bind(wx.EVT_MENU, self.OnPopupFive, id=self.popupID5)
self.Bind(wx.EVT_MENU, self.OnPopupSix, id=self.popupID6)
self.Bind(wx.EVT_MENU, self.OnPopupSeven, id=self.popupID7)
self.Bind(wx.EVT_MENU, self.OnPopupEight, id=self.popupID8)
self.Bind(wx.EVT_MENU, self.OnPopupNine, id=self.popupID9)
# make a menu
menu = wx.Menu()
# Show how to put an icon in the menu
item = wx.MenuItem(menu, self.popupID1,"One")
bmp = images.Smiles.GetBitmap()
item.SetBitmap(bmp)
menu.Append(item)
# add some other items
menu.Append(self.popupID2, "Two")
menu.Append(self.popupID3, "Three")
menu.Append(self.popupID4, "Four")
menu.Append(self.popupID5, "Five")
menu.Append(self.popupID6, "Six")
# make a submenu
sm = wx.Menu()
sm.Append(self.popupID8, "sub item 1")
sm.Append(self.popupID9, "sub item 1")
menu.Append(self.popupID7, "Test Submenu", sm)
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(menu)
menu.Destroy()
def OnPopupOne(self, event):
self.log.WriteText("Popup one\n")
def OnPopupTwo(self, event):
self.log.WriteText("Popup two\n")
def OnPopupThree(self, event):
self.log.WriteText("Popup three\n")
def OnPopupFour(self, event):
self.log.WriteText("Popup four\n")
def OnPopupFive(self, event):
self.log.WriteText("Popup five\n")
def OnPopupSix(self, event):
self.log.WriteText("Popup six\n")
def OnPopupSeven(self, event):
self.log.WriteText("Popup seven\n")
def OnPopupEight(self, event):
self.log.WriteText("Popup eight\n")
def OnPopupNine(self, event):
self.log.WriteText("Popup nine\n")
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>PopupMenu</center></h2>
""" + text + """
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| dnxbjyj/python-basic | gui/wxpython/wxPython-demo-4.0.1/demo/PopupMenu.py | Python | mit | 4,928 | 0.004261 |
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['sceper.ws','sceper.unblocked.pro']
self.base_link = 'https://sceper.unblocked.pro'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = zip(client.parseDOM(c, 'a', ret='href'), client.parseDOM(c, 'a'))
u = [(i[0], i[1], re.findall('PT(\d+)$', i[1])) for i in u]
u = [(i[0], i[1]) for i in u if not i[2]]
if 'tvshowtitle' in data:
u = [([x for x in i[0].strip('//').split('/')][-1], i[0]) for i in u]
else:
u = [(t, i[0], s) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| TheWardoctor/Wardoctors-repo | script.module.fantastic/lib/resources/lib/sources/en/sceper.py | Python | apache-2.0 | 6,854 | 0.018675 |
from __future__ import with_statement
from pyramid.view import view_config
from pyramid.renderers import render
from intranet3.utils.views import BaseView
from intranet3.models import (
User,
TimeEntry,
Tracker,
Project,
Client,
DBSession,
)
from intranet3.forms.times import ProjectsTimeForm, TimeEntryForm
from intranet3.log import INFO_LOG, WARN_LOG, ERROR_LOG, DEBUG_LOG, EXCEPTION_LOG
from intranet3.lib.times import TimesReportMixin, HTMLRow, dump_entries_to_excel
LOG = INFO_LOG(__name__)
WARN = WARN_LOG(__name__)
ERROR = ERROR_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
MAX_TIMEOUT = 20 # DON'T WAIT LONGER THAN DEFINED TIMEOUT
MAX_TICKETS_PER_REQUEST = 50 # max number of ticket ids to include in a single request to tracker
@view_config(route_name='times_tickets_excel', permission='can_view_time_report')
class Excel(BaseView):
def get(self):
client = self.request.user.get_client()
form = ProjectsTimeForm(formdata=self.request.GET, client=client)
if not form.validate():
return render('time/tickets_report/projects_report.html', dict(form=form))
query = DBSession.query
start_date, end_date = form.date_range.data
projects = form.projects.data
users = form.users.data
ticket_choice = form.ticket_choice.data
group_by = (
form.group_by_client.data,
form.group_by_project.data,
form.group_by_bugs.data,
form.group_by_user.data
)
bigger_than = form.bigger_than.data
LOG(u'Tickets report %r - %r - %r' % (start_date, end_date, projects))
uber_query = query(Client, Project, TimeEntry.ticket_id, User, Tracker, TimeEntry.description, TimeEntry.date, TimeEntry.time)
uber_query = uber_query.filter(TimeEntry.user_id==User.id)\
.filter(TimeEntry.project_id==Project.id)\
.filter(Project.tracker_id==Tracker.id)\
.filter(Project.client_id==Client.id)
if projects:
uber_query = uber_query.filter(TimeEntry.project_id.in_(projects))
uber_query = uber_query.filter(TimeEntry.date>=start_date)\
.filter(TimeEntry.date<=end_date)\
.filter(TimeEntry.deleted==False)
if ticket_choice == 'without_bug_only':
uber_query = uber_query.filter(TimeEntry.ticket_id=='')
elif ticket_choice == 'meetings_only':
meeting_ids = [t['value'] for t in TimeEntryForm.PREDEFINED_TICKET_IDS]
uber_query = uber_query.filter(TimeEntry.ticket_id.in_(meeting_ids))
if users:
uber_query = uber_query.filter(User.id.in_(users))
uber_query = uber_query.order_by(Client.name, Project.name, TimeEntry.ticket_id, User.name)
entries = uber_query.all()
file, response = dump_entries_to_excel(entries, group_by, bigger_than)
return response
@view_config(route_name='times_tickets_report', permission='can_view_time_report')
class Report(TimesReportMixin, BaseView):
def dispatch(self):
client = self.request.user.get_client()
form = ProjectsTimeForm(self.request.GET, client=client)
if not self.request.GET or not form.validate():
return dict(form=form)
start_date, end_date = form.date_range.data
projects = form.projects.data
if not projects:
projects = [p[0] for p in form.projects.choices]
bug_id = self.request.GET.get('bug_id')
users = form.users.data
bigger_than = form.bigger_than.data
ticket_choice = form.ticket_choice.data
group_by = (
form.group_by_client.data,
form.group_by_project.data,
form.group_by_bugs.data,
form.group_by_user.data
)
LOG(u'Tickets report %r - %r - %r' % (start_date, end_date, projects))
uber_query = self._prepare_uber_query(
start_date, end_date, projects, users, ticket_choice, bug_id
)
entries = uber_query.all()
participation_of_workers = self._get_participation_of_workers(entries)
tickets_id = ','.join([str(e[2]) for e in entries])
trackers_id = ','.join([str(e[4].id) for e in entries])
rows, entries_sum = HTMLRow.from_ordered_data(entries, group_by, bigger_than)
return dict(
rows=rows,
entries_sum=entries_sum,
form=form,
participation_of_workers=participation_of_workers,
participation_of_workers_sum=sum([time[1] for time in participation_of_workers]),
trackers_id=trackers_id, tickets_id=tickets_id,
str_date=self._sprint_daterange(start_date, end_date),
)
def _sprint_daterange(self, st, end):
return '%s - %s' % (st.strftime('%d-%m-%Y'), end.strftime('%d-%m-%Y'))
| stxnext/intranet-open | src/intranet3/intranet3/views/times/tickets.py | Python | mit | 4,991 | 0.004809 |
# Open Modeling Framework (OMF) Software for simulating power systems behavior
# Copyright (c) 2015, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
import pyhdfs
import shutil
import vcap_parser
import os
from os.path import join as pJoin
from models.__metaModel__ import _omfDir
class Hdfs(object):
HOME_DIR = '/user/omf/' # + vcap_parser.get_space_name() + '/'
populated = False
def __init__(self):
self.hdfs = pyhdfs.HdfsClient(self.__get_all_namenodes())
def __get_all_namenodes(self):
namenodes = []
credentials = vcap_parser.get_service_credentials('hdfs')
cluster_config = credentials['HADOOP_CONFIG_KEY']
names = cluster_config['dfs.ha.namenodes.nameservice1'].split(',')
for name in names:
namenodes.append(cluster_config['dfs.namenode.http-address.nameservice1.' + name])
return namenodes
def create_dir(self, path):
print 'HDFS: Creating directory', path
self.hdfs.mkdirs(Hdfs.HOME_DIR + path)
def listdir(self, directory):
return self.hdfs.listdir(Hdfs.HOME_DIR + directory)
def is_dir(self, directory):
print self.hdfs.get_file_status(Hdfs.HOME_DIR + directory).type
return self.hdfs.get_file_status(Hdfs.HOME_DIR + directory).type == "DIRECTORY"
def remove(self, path):
try:
self.hdfs.delete(Hdfs.HOME_DIR + path, recursive=True)
except pyhdfs.HdfsPathIsNotEmptyDirectoryException:
self.hdfs.delete(Hdfs.HOME_DIR + path + "/*")
self.hdfs.delete(Hdfs.HOME_DIR + path)
def stat(self, path):
status = self.hdfs.get_file_status(Hdfs.HOME_DIR + path)
return status
def get_file_modification_time(self, path):
return self.hdfs.get_file_status(Hdfs.HOME_DIR + path).modificationTime / 1000
def exists(self, path):
return self.hdfs.exists(Hdfs.HOME_DIR + path)
def open(self, path):
f = self.hdfs.open(Hdfs.HOME_DIR + path)
print "Opening file: " + path + ". Type is: " + str(type(f))
return f
def save(self, path, content):
try:
self.hdfs.create(Hdfs.HOME_DIR + path, content)
except pyhdfs.HdfsFileAlreadyExistsException:
self.hdfs.delete(Hdfs.HOME_DIR + path)
self.hdfs.create(Hdfs.HOME_DIR + path, content)
def walk(self, path):
print "Walk in path: " + path
return self.hdfs.walk(Hdfs.HOME_DIR + path)
def copy_within_fs(self, source, target):
print "HDFS: Copy within fs: copying to local... from " + _omfDir + "/tmp/" + source + " to: " + Hdfs.HOME_DIR + target
if not os.path.exists(pJoin(_omfDir, "tmp", source)):
os.makedirs(pJoin(_omfDir, "tmp", source))
self.hdfs.copy_to_local(Hdfs.HOME_DIR + source, pJoin(_omfDir, "tmp", source))
try:
print "HDFS: Copy within fs: copying from local... from: " + Hdfs.HOME_DIR + target + " to: " + _omfDir + "/tmp/" + source
self.hdfs.copy_from_local(pJoin(_omfDir, "tmp", source), Hdfs.HOME_DIR + target)
except pyhdfs.HdfsFileAlreadyExistsException:
print "HDFS: Copy within fs: file existed before :("
self.hdfs.delete(Hdfs.HOME_DIR + target)
self.hdfs.copy_from_local(pJoin(_omfDir, "tmp", source), Hdfs.HOME_DIR + target)
def export_to_hdfs(self, directory, file_to_export):
print 'HDFS: Copying file from local filesystem at ' + file_to_export.filename + ' to HDFS at ' + Hdfs.HOME_DIR + file_to_export.filename
self.hdfs.copy_from_local(file_to_export.filename, pJoin(Hdfs.HOME_DIR, directory, file_to_export.filename),
overwrite=True)
return True
def export_local_to_hdfs(self, directory, file_to_export):
filename = file_to_export.split("/")[-1]
print 'HDFS: Copying file from local filesystem at ' + file_to_export + ' to HDFS at ' + Hdfs.HOME_DIR + directory + "/" + filename
self.hdfs.copy_from_local(file_to_export, pJoin(Hdfs.HOME_DIR, directory, filename), overwrite=True)
return True
def export_from_fs_to_local(self, source, target):
directory = os.path.split(target)[0]
if not os.path.isdir(directory):
os.makedirs(directory)
self.hdfs.copy_to_local(Hdfs.HOME_DIR + source, pJoin(_omfDir, target))
def import_files_to_hdfs(self, local_directory, hdfs_directory):
print "Exporting files from local directory: " + local_directory + " to hdfs directory: " + hdfs_directory
self.create_dir(hdfs_directory)
for f in os.listdir(local_directory):
self.export_local_to_hdfs(hdfs_directory, pJoin(local_directory, f))
return True
def recursive_import_to_hdfs(self, start_dir):
self.create_dir(start_dir)
for f in os.listdir(pJoin(_omfDir, start_dir)):
if os.path.isdir(pJoin(_omfDir, start_dir, f)):
self.create_dir(pJoin(start_dir, f))
self.recursive_import_to_hdfs(pJoin(start_dir, f))
else:
self.export_local_to_hdfs(start_dir, pJoin(_omfDir, start_dir, f))
return True
def populate_hdfs(self):
template_files = []
model_files = []
try:
template_files = ["templates/" + x for x in self.listdir("templates")]
except:
print "importing templates to hdfs"
if self.import_files_to_hdfs("templates", "templates"):
template_files = ["templates/" + x for x in self.listdir("templates")]
shutil.rmtree("templates")
try:
model_files = ["models/" + x for x in self.listdir("models") if not (x.endswith('.pyc') or x.endswith('.py'))]
except:
print "importing models to hdfs"
if self.import_files_to_hdfs("models", "models"):
model_files = ["models/" + x for x in self.listdir("models")]
shutil.rmtree("models")
try:
if not self.exists("data"):
self.recursive_import_to_hdfs("data")
except Exception as e:
print "Could not import data.... Reason: " + str(e)
try:
if not self.exists("static"):
self.recursive_import_to_hdfs("static")
except Exception as e:
print "Could not import data.... Reason: " + str(e)
self.populated = True
return template_files, model_files
def populate_local(self):
if not os.path.exists("data"):
try:
self.export_from_fs_to_local("data", "data")
except Exception as e:
print "Could not import data.... Reason: " + str(e)
else:
print "Data directory already exists."
| geomf/omf-fork | omf/hdfs.py | Python | gpl-2.0 | 7,235 | 0.003179 |
from rest_framework import permissions
class IsReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == self.request.user | linfanangel/Trality | cart/cartapp/permission.py | Python | gpl-3.0 | 268 | 0.003731 |
import random
def generate(data):
ask = ['equivalent resistance $R_T$', 'current from the power supply $I_T$']
which = random.choice([0,1])
data['params']['ask'] = ask[which]
label = ["$R_T$", "$I_T$"]
data['params']['lab'] = label[which]
unit = ["$\\Omega$", "A"]
data['params']['unit'] = unit[which]
Vt = random.randint(100,200)
data['params']['Vt'] = Vt
R1 = random.choice(list(range(20,180,10)))
data['params']['R1'] = R1
R2 = random.choice(list(range(20,180,20)))
data['params']['R2'] = R2
R3 = random.choice(list(range(20,100,5)))
data['params']['R3'] = R3
figname = ["circ1.png", "circ2.png"]
whichfig = random.choice([0,1])
data['params']['figname'] = figname[whichfig]
if whichfig: # this is the series
Rt = R1 + R2 + R3
else: # this is the parallel
Rtinv = 1/R1 + 1/R2 + 1/R3
Rt = 1/Rtinv
It = Vt/Rt
ans = [Rt, It]
data['correct_answers']['ans'] = ans[which]
| PrairieLearn/PrairieLearn | exampleCourse/questions/workshop/Lesson1_example3_v3/server.py | Python | agpl-3.0 | 1,005 | 0.01393 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
left = self.pathSum(root.left, sum)
right = self.pathSum(root.right, sum)
return self.rootSum(root, sum) + left + right
def rootSum(self, root, sum):
if not root:
return 0
left = self.rootSum(root.left, sum - root.val)
right = self.rootSum(root.right, sum - root.val)
return (root.val == sum) + left + right
| zqfan/leetcode | algorithms/437. Path Sum III/solution.py | Python | gpl-3.0 | 748 | 0 |
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
__all__ = [
'ApplySerializer', 'LoginAssetConfirmSerializer',
]
class ApplySerializer(serializers.Serializer):
# 申请信息
apply_login_user = serializers.CharField(required=True, label=_('Login user'))
apply_login_asset = serializers.CharField(required=True, label=_('Login asset'))
apply_login_system_user = serializers.CharField(
required=True, max_length=64, label=_('Login system user')
)
class LoginAssetConfirmSerializer(ApplySerializer):
pass
| jumpserver/jumpserver | apps/tickets/serializers/ticket/meta/ticket_type/login_asset_confirm.py | Python | gpl-3.0 | 591 | 0.003431 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_libnetwork.schemata import commons
REQUEST_POOL_SCHEMA = {
u'links': [{
u'method': u'POST',
u'href': u'/IpamDriver.RequestPool',
u'description': u'Allocate pool of ip addresses',
u'rel': u'self',
u'title': u'Create'
}],
u'title': u'Create pool',
u'required': [u'AddressSpace', u'Pool', u'SubPool', u'V6'],
u'definitions': {u'commons': {}},
u'$schema': u'http://json-schema.org/draft-04/hyper-schema',
u'type': u'object',
u'properties': {
u'AddressSpace': {
u'description': u'The name of the address space.',
u'type': u'string',
u'example': u'foo',
},
u'Pool': {
u'description': u'A range of IP Addresses represented in '
u'CIDR format address/mask.',
u'$ref': u'#/definitions/commons/definitions/cidr'
},
u'SubPool': {
u'description': u'A subset of IP range from Pool in'
u'CIDR format address/mask.',
u'$ref': u'#/definitions/commons/definitions/cidr'
},
u'Options': {
u'type': [u'object', u'null'],
u'description': u'Options',
u'example': {},
},
u'V6': {
u'description': u'If set to "True", requesting IPv6 pool and '
u'vice-versa.',
u'type': u'boolean',
u'example': False
}
}
}
REQUEST_POOL_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS
| celebdor/kuryr-libnetwork | kuryr_libnetwork/schemata/request_pool.py | Python | apache-2.0 | 2,102 | 0 |
# Generated by Django 3.0.7 on 2021-03-10 05:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0005_owner_model'),
('order', '0041_auto_20210114_1728'),
]
operations = [
migrations.AddField(
model_name='purchaseorder',
name='responsible',
field=models.ForeignKey(blank=True, help_text='User or group responsible for this order', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='users.Owner', verbose_name='Responsible'),
),
migrations.AddField(
model_name='salesorder',
name='responsible',
field=models.ForeignKey(blank=True, help_text='User or group responsible for this order', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='users.Owner', verbose_name='Responsible'),
),
]
| inventree/InvenTree | InvenTree/order/migrations/0042_auto_20210310_1619.py | Python | mit | 972 | 0.002058 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import time
from django import forms
from django.conf import settings
from django.core.management import call_command
from django.http.response import HttpResponse, JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from six import StringIO
from shuup.addons.manager import get_enabled_addons
from shuup.addons.reloader import get_reload_method_classes
from shuup.apps.settings import reload_apps
from shuup.utils.excs import Problem
from shuup.utils.iterables import first
class ReloadMethodForm(forms.Form):
def get_viable_reload_methods(self):
for klass in get_reload_method_classes():
rm = klass()
if rm.is_viable():
yield rm
def __init__(self, **kwargs):
super(ReloadMethodForm, self).__init__(**kwargs)
self.reload_methods = list(self.get_viable_reload_methods())
if not self.reload_methods:
raise Problem(_("There are no viable reload methods available. Please contact your system administrator."))
self.fields["reload_method"] = forms.ChoiceField(
choices=[(rm.identifier, rm.title) for rm in self.reload_methods],
label=_("Reload Method"),
initial=self.reload_methods[0].identifier,
widget=forms.RadioSelect
)
def get_selected_reload_method(self):
return first(rm for rm in self.reload_methods if rm.identifier == self.cleaned_data["reload_method"])
def finalize_installation_for_enabled_apps():
out = StringIO()
enabled_addons = get_enabled_addons(settings.SHUUP_ENABLED_ADDONS_FILE)
new_apps = [app for app in enabled_addons if app not in settings.INSTALLED_APPS]
if new_apps:
out.write("Enabling new addons: %s" % new_apps)
settings.INSTALLED_APPS += type(settings.INSTALLED_APPS)(new_apps)
reload_apps()
call_command("migrate", "--noinput", "--no-color", stdout=out)
call_command("collectstatic", "--noinput", "--no-color", stdout=out)
return out.getvalue()
class ReloadView(FormView):
template_name = "shuup/admin/addons/reload.jinja"
form_class = ReloadMethodForm
def form_valid(self, form):
reloader = form.get_selected_reload_method()
reloader.execute()
return HttpResponse(_("Reloading.")) # This might not reach the user...
def get(self, request, *args, **kwargs):
if request.GET.get("ping"):
return JsonResponse({"pong": time.time()})
elif request.GET.get("finalize"):
return JsonResponse({"message": finalize_installation_for_enabled_apps()})
return super(ReloadView, self).get(request, *args, **kwargs)
| suutari-ai/shoop | shuup/addons/admin_module/views/reload.py | Python | agpl-3.0 | 3,004 | 0.001664 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='response',
field=models.CharField(default=b'', max_length=4, null=True, blank=True),
),
]
| erickdom/restAndroid | transactions/migrations/0002_transaction_response.py | Python | apache-2.0 | 436 | 0.002294 |
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
return open(filename).read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)
m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)
if m1 and m2 and check(m1.group(1), m2.group(1),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self):
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % module.__name__
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase())
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7/Lib/doctest.py | Python | mit | 101,750 | 0.001179 |
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""File format specific behavior."""
from weblate.formats.convert import (
HTMLFormat,
IDMLFormat,
OpenDocumentFormat,
PlainTextFormat,
WindowsRCFormat,
)
from weblate.formats.helpers import BytesIOMode
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.trans.tests.utils import get_test_file
IDML_FILE = get_test_file("en.idml")
HTML_FILE = get_test_file("cs.html")
OPENDOCUMENT_FILE = get_test_file("cs.odt")
TEST_RC = get_test_file("cs-CZ.rc")
TEST_TXT = get_test_file("cs.txt")
class ConvertFormatTest(AutoFormatTest):
NEW_UNIT_MATCH = None
EXPECTED_FLAGS = ""
def parse_file(self, filename):
return self.FORMAT(filename, template_store=self.FORMAT(filename))
class HTMLFormatTest(ConvertFormatTest):
FORMAT = HTMLFormat
FILE = HTML_FILE
MIME = "text/html"
EXT = "html"
COUNT = 5
MASK = "*/translations.html"
EXPECTED_PATH = "cs_CZ/translations.html"
FIND_CONTEXT = "+html.body.p:5-1"
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"<body>"
NEW_UNIT_MATCH = None
BASE = HTML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
class OpenDocumentFormatTest(ConvertFormatTest):
FORMAT = OpenDocumentFormat
FILE = OPENDOCUMENT_FILE
MIME = "application/vnd.oasis.opendocument.text"
EXT = "odt"
COUNT = 4
MASK = "*/translations.odt"
EXPECTED_PATH = "cs_CZ/translations.odt"
FIND_CONTEXT = (
"odf///office:document-content[0]/office:body[0]/office:text[0]/text:p[1]"
)
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = OPENDOCUMENT_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
OpenDocumentFormat.convertfile(BytesIOMode("test.odt", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class IDMLFormatTest(ConvertFormatTest):
FORMAT = IDMLFormat
FILE = IDML_FILE
MIME = "application/octet-stream"
EXT = "idml"
COUNT = 6
MASK = "*/translations.idml"
EXPECTED_PATH = "cs_CZ/translations.idml"
FIND_CONTEXT = (
"idPkg:Story[0]/{}Story[0]/{}XMLElement[0]/{}ParagraphStyleRange[0]"
"Stories/Story_mainmainmainmainmainmainmainmainmainmainmainu188.xml"
)
FIND_MATCH = """<g id="0"><g id="1">THE HEADLINE HERE</g></g>"""
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = IDML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
IDMLFormat.convertfile(BytesIOMode("test.idml", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class WindowsRCFormatTest(ConvertFormatTest):
FORMAT = WindowsRCFormat
FILE = TEST_RC
BASE = TEST_RC
MIME = "text/plain"
EXT = "rc"
COUNT = 5
MASK = "rc/*.rc"
EXPECTED_PATH = "rc/cs-CZ.rc"
MATCH = "STRINGTABLE"
FIND_CONTEXT = "STRINGTABLE.IDS_MSG1"
FIND_MATCH = "Hello, world!\n"
EDIT_OFFSET = 1
class PlainTextFormatTest(ConvertFormatTest):
FORMAT = PlainTextFormat
FILE = TEST_TXT
BASE = TEST_TXT
MIME = "text/plain"
EXT = "txt"
COUNT = 5
MASK = "txt/*.txt"
EXPECTED_PATH = "txt/cs_CZ.txt"
MATCH = "Hello"
FIND_CONTEXT = "cs.txt:2"
FIND_MATCH = "Hello, world!"
EDIT_OFFSET = 1
| phw/weblate | weblate/formats/tests/test_convert.py | Python | gpl-3.0 | 4,423 | 0.000452 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pyscada', '0010_auto_20160115_0918'),
('modbus', '0003_auto_20160115_0918'),
]
operations = [
migrations.RenameField(
model_name='modbusdevice',
old_name='modbus_client',
new_name='modbus_device',
),
]
| trombastic/PyScada | pyscada/modbus/migrations/0004_auto_20160115_0920.py | Python | gpl-3.0 | 454 | 0 |
# -*- coding: utf-8 -*-
# © 2017 Savoir-faire Linux
# License LGPL-3.0 or later (http://www.gnu.org/licenses/gpl).
from odoo import api, models
class IrRule(models.Model):
_inherit = 'ir.rule'
@api.model
def _compute_domain(self, model_name, mode="read"):
if getattr(self.env, '_bypass_access', False):
if self.env._bypass_exception != model_name:
return []
return super(IrRule, self)._compute_domain(model_name, mode=mode)
| savoirfairelinux/secure-odoo | action_access_control_list/models/ir_rule.py | Python | lgpl-3.0 | 486 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------
# File Name: mul.py
# Author: Zhao Yanbai
# Thu Oct 1 15:10:27 2015
# Description: none
# ------------------------------------------------------------------------
for j in range(1, 10) :
for i in range(1, 10) :
if i>j :
continue
print "{0}*{1}={2:<2d}\t".format(i, j, i*j),
print ""
| acevest/acecode | learn/python/mul.py | Python | gpl-2.0 | 462 | 0.008658 |
#!/usr/bin/env python3
# Copyright (C) 2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
"""
resourcetool.py
Simple utility to list or update RADB resource availability values.
Essentially a tool around RADB getResources(), updateResourceAvailability(), getResourceClaims() and (parts of) updateResourceClaims().
Can also figure out available capacity for a mounted storage resource and update it in the RADB (-U/--update-available-storage-capacity option).
Can also update storage claim endtime to its task endtime (if ended) in the RADB (-E/--end-past-tasks-storage-claims option).
Examples (source lofarinit.sh to set LOFARROOT, PYTHONPATH, ...):
- Update available (local) storage capacity and set storage claim endtimes to task endtimes (if ended) for an observation storage node, e.g. via cron in operations:
source /opt/lofar/lofarinit.sh; LOFARENV=PRODUCTION /opt/lofar/bin/resourcetool --broker=scu001.control.lofar --end-past-tasks-storage-claims --update-available-storage-capacity
- Show all DRAGNET resources on the test system RADB:
LOFARENV=TEST resourcetool --broker=scu199.control.lofar --resource-group-root=DRAGNET
- Deactivate 2 storage resources in operations, because disks from both storage areas are found to be faulty (then still need to re-schedule tasks):
LOFARENV=PRODUCTION resourcetool --broker=scu001.control.lofar drg01_storage:/data1=False drg01_storage:/data2=False
NOTES:
! Be careful what system (operations or test) this command applies to! This can be set using the env vars LOFARENV=TEST or LOFARENV=PRODUCTION
Operations vs Test (vs Development) can be seen from the default RADB_BUSNAME in the usage info: lofar.* vs test.lofar.* vs devel.lofar.*
! By default, listed or updateable resources are restricted to resources under the localhost's resource group.
This is on purpose to make -U work correctly. The -G/--resource-group-root option can be used to widen the resource group scope for listing
or explicit command-line updates, but non-default -G with -U is rejected: it is too easy to mass-update other resources with local filesystem info.
"""
import logging
from datetime import datetime, timedelta
from lofar.messaging import DEFAULT_BROKER, DEFAULT_BUSNAME
from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RADBRPC
from lofar.common.util import humanreadablesize
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.WARN)
logger = logging.getLogger(__name__)
def printResources(resources, scaled_units=True):
""" E.g.: resources = [{u'total_capacity': 3774873600, u'name': u'dragproc_bandwidth:/data', u'type_id': 3,
u'available_capacity': 3774873600, u'type_name': u'bandwidth', u'unit_id': 3,
u'active': True, u'used_capacity': 0, u'id': 118, u'unit': u'bits/second',
'claimable_capacity': 3774873600}, ...] # this key was added (not from RADB); it can be negative!
"""
header = {'id': 'RId', 'name': 'Resource Name', 'active': 'Active',
'available_capacity': ' Avail. Capacity', 'claimable_capacity': ' Claimable Cap.',
'total_capacity': ' Total Capacity', 'unit': 'Unit'}
print(('{id:4s} {name:24s} {active:6s} {available_capacity} {claimable_capacity} {total_capacity} {unit}'.format(**header)))
print('===================================================================================================')
resources.sort(key=lambda r: r['id']) # SQL could have done this better
for res in resources:
res['active'] = 'True' if res['active'] else 'False' # to solve bool formatting issue
if scaled_units and (res['type_name'] == 'storage' or res['type_name'] == 'bandwidth'):
unit_base = 1024 if res['type_name'] == 'storage' else 1000 # check type_name instead of unit as in printClaims()
res['available_capacity'] = humanreadablesize(res['available_capacity'], '', unit_base)
res['claimable_capacity'] = humanreadablesize(res['claimable_capacity'], '', unit_base)
res['total_capacity'] = humanreadablesize(res['total_capacity'] , '', unit_base)
cap_conv = '>16s'
else:
cap_conv = '16d'
print((('{id:4d} {name:24s} {active:6s} {available_capacity:' + cap_conv +
'} {claimable_capacity:' + cap_conv + '} {total_capacity:' + cap_conv + '} {unit}').format(**res)))
if not resources:
print('<no resources>')
def printClaims(claims, scaled_units=True):
""" E.g.: claims = [{u'claim_size': 76441190400, u'endtime': datetime.datetime(2018, 6, 13, 17, 40),
u'id': 67420, u'resource_id': 122, u'resource_name': u'drg01_storage:/data1',
u'resource_type_id': 5, u'resource_type_name': u'storage',
u'starttime': datetime.datetime(2017, 6, 13, 17, 30),
u'status': u'claimed', u'status_id': 1, u'task_id': 75409, ...}, ...]
"""
header = {'id': 'ClId', 'resource_name': 'Resource Name', 'starttime': 'Start Time', 'endtime': 'End Time',
'claim_size': 'Claim Size', 'status': 'Status'}
print(('{id:7s} {resource_name:24s} {starttime:19s} {endtime:19s} {claim_size:16s} {status:8s}'.format(**header)))
print('===================================================================================================')
claims.sort(key=lambda c: c['id']) # secondary sorting key; SQL could have done this better
claims.sort(key=lambda c: c['starttime']) # primary sorting key (stable sort)
for claim in claims:
if scaled_units and (claim['resource_type_name'] == 'storage' or claim['resource_type_name'] == 'bandwidth'):
unit_base = 1024 if claim['resource_type_name'] == 'storage' else 1000 # no unit name here, so check type_name
claim['claim_size'] = humanreadablesize(claim['claim_size'], '', unit_base)
size_conv = '>16s'
else:
size_conv = '16d'
print((('{id:7d} {resource_name:24s} {starttime} {endtime} {claim_size:' + size_conv +
'} {status:8s}').format(**claim)))
if not claims:
print('<no claims on specified resources and time range>')
def updateStorageClaimsEndTime(radb, resources, storage_resource_type_id, lower_bound=None, upper_bound=None):
""" Update storage claims on resources in the RADB that currently apply, but the task
they belong to has ended (+ a short while). Set end time of these claims to task endtime.
This is intended for user clusters (e.g. DRAGNET) that do not auto-terminate storage claims on
cleanup. If users manage clean up autonomously, then they manage all storage accounting themselves.
"""
status = 0
resource_ids = [res['id'] for res in resources]
now = datetime.utcnow()
if lower_bound is None:
lower_bound = now
if upper_bound is None:
upper_bound = now
claims = radb.getResourceClaims(lower_bound=lower_bound, upper_bound=upper_bound,
resource_ids=resource_ids,
resource_type=storage_resource_type_id)
# Get associated tasks for their end times. Update claims for tasks that ended.
task_ids = list(set({claim['task_id'] for claim in claims}))
tasks = radb.getTasks(task_ids=task_ids)
for task in tasks:
# Wait until task ended. Do not race with OTDBtoRATaskStatusPropagator that extends storage claim endtime.
# We effectively undo that extension here. Intended for clusters (e.g. DRAGNET) where end users manage storage.
new_endtime = task['endtime']
if now < new_endtime + timedelta(minutes=1):
continue
claim_ids = [claim['id'] for claim in claims if claim['task_id'] == task['id'] and \
claim['endtime'] > new_endtime]
print(("Updating RADB storage claims {} endtime to {}".format(claim_ids, new_endtime)))
updated_dict = radb.updateResourceClaims(where_resource_claim_ids=claim_ids, endtime=new_endtime)
if not updated_dict['updated']:
logger.error('failed to update RADB storage claims') # why is N/A here; check the RA logs
status = 1
return status
def updateResource(radb, resource):
""" Update the RADB using the resource dict. """
print(("Updating RADB with resource {}".format(resource)))
updated_dict = radb.updateResourceAvailability(resource_id=resource['id'], active=resource['active'],
available_capacity=resource['available_capacity'],
total_capacity=resource['total_capacity'])
if not updated_dict['updated']:
logger.error('failed to update RADB resource') # why is N/A here; check the RA logs
return 1
return 0
def getMountPoint(resource_name):
""" E.g. with resource_name 'CEP4_storage:/data' or 'drg15_bandwidth:/data2' or 'CS002bw0',
this function returns: '/data' or '/data2' or None.
"""
sep_idx = resource_name.find(':/') # mount point must be an abs path
if sep_idx == -1:
return None
return resource_name[sep_idx + 1 : ]
def updateAvailableStorageCapacities(radb, resources):
import os
status = 0
for res in resources:
# All storage resource names are supposedly mount points.
# But do not update with the wrong partition info (sys maintenance?).
# Log error and let admin figure it out. RADB resource defaults may need updating too.
mount_pt = getMountPoint(res['name'])
if mount_pt is None or not os.path.ismount(mount_pt):
logger.error("skipped updating available capacity of resource '{}': its path is not a mount point on this system".format(res['name']))
status = 1
continue
# Retrieve avail capacity from filesystem and do some checks.
try:
st = os.statvfs(mount_pt)
except OSError as e:
logger.error('statvfs: ' + str(e))
status = 1
continue
avail_cap = st.f_bavail * st.f_frsize
total_cap = st.f_blocks * st.f_frsize
if total_cap != res['total_capacity']:
hint_arg = res['name'] + '=' + str(avail_cap) + ',' + str(total_cap)
logger.warn("total capacity for resource '{}' is {}, which is not equal to {} as listed in the RADB. If the total capacity has changed permanently, please update the RADB, e.g. by running this program passing: {} (and by updating the software repo for RADB reinstalls).".format(res['name'], total_cap, res['total_capacity'], hint_arg))
if avail_cap > res['total_capacity']:
logger.error("the detected available capacity for resource '{}' cannot be written to the RADB, because it is greater than the total capacity listed in the RADB.")
status = 1
continue
# Only update available capacity in the RADB.
# Total and active indicate a config change (or maintenance in progress). Leave that for an admin.
res_update = {'id': res['id'], 'available_capacity': avail_cap,
'total_capacity': None, 'active': None}
status |= updateResource(radb, res_update)
return status
def updateSpecifiedCapacities(radb, resources, resource_updates):
status = 0
for res_update in resource_updates:
# Need resource id from name to apply the update. Also check avail <= total.
try:
res = next((res for res in resources if res['name'] == res_update['name']))
except StopIteration:
logger.error("skipped updating resource '{}': name unknown. Correct the name or (correct the) use (of) the -G/--resource-group-root option to widen the resource scope, e.g. -G CEP4|DRAGNET|LOFAR".format(res_update['name']))
status = 1
continue
if res_update['available_capacity'] is not None and \
res_update['total_capacity'] is None and \
res_update['available_capacity'] > res['total_capacity']:
logger.error("skipped updating resource '{}': specified available capacity cannot be greater than total capacity listed in the RADB. If the total capacity has changed permanently, please update the RADB using this program (and by updating the software repo for RADB reinstalls)".format(res_update['name']))
status = 1
continue
res_update['id'] = res['id']
status |= updateResource(radb, res_update)
return status
def getResourceGroupIdByName(db_rgp2rgp, name):
""" Returns group id of resource group named name, or None if name was not found.
The search happens breadth-first.
"""
# find root group(s): empty parent list
gids = [gid for gid, group in list(db_rgp2rgp.items()) if not group['parent_ids']]
i = 0
while i < len(gids): # careful iterating while modifying
res_group = db_rgp2rgp[gids[i]]
if res_group['resource_group_name'] == name:
return gids[i]
gids.extend(res_group['child_ids'])
i += 1
return None
def getSubtreeResourceIdList(db_rgp2rgp, root_gid):
""" Returns list of resource ids in resource group root_gid and its (grand)children."""
# Search breadth-first starting at root_gid.
gids = [root_gid]
resource_id_list = []
i = 0
while i < len(gids): # careful iterating while modifying
res_group = db_rgp2rgp[gids[i]]
resource_id_list.extend(res_group['resource_ids'])
gids.extend(res_group['child_ids'])
i += 1
return resource_id_list
def parseResourceArg(arg):
""" Return dict parsed from arg str. Arg format: resource_name:/data=True,100,200
with any value optional after the '=' (but need at least one).
Any returned dict value but the resource name may be None.
On error ValueError is raised.
"""
eq_idx = arg.find('=')
if eq_idx == -1:
raise ValueError("could not find '=' in argument; need e.g. res_name=100 or resource_name=True,100,200")
resource_name = arg[ : eq_idx]
if not resource_name:
raise ValueError("invalid resource name in argument before '='; need e.g. res_name=100 or resource_name=True,100,200")
resource_val = arg[eq_idx + 1 : ]
vals = resource_val.split(',')
if not vals or len(vals) > 3:
raise ValueError("need 1-3 argument value(s) after '=', e.g. res_name=100 or resource_name=True,100,200")
active = None
avail_cap = None
total_cap = None
for val in vals:
if val == 'True' or val == 'False':
if active is not None:
raise ValueError("accepting at most 1 bool as resource active value in argument")
active = True if val == 'True' else False
continue
if total_cap is not None:
raise ValueError("accepting at most 2 ints as resource available and total capacities in argument")
v = int(val)
if v < 0:
raise ValueError("capacity value must be positive")
if avail_cap is None:
avail_cap = v
else:
if v < avail_cap:
raise ValueError("specified available capacity cannot be greater than specified total capacity")
total_cap = v
return {'name': resource_name, 'active': active,
'available_capacity': avail_cap, 'total_capacity': total_cap}
def parseTimestamps(datetime_fmt, timestamps):
""" Return list of None or datetime objects representing timestamps. Raise ValueError on parse error.
Use datetime_fmt as the strptime() format str. A timestamp value may also be 'now' (UTC) or 'None'.
"""
# Parsing datetime strings could be done by extending optparse's Option class, but this works well enough
rv = []
now = None
for ts in timestamps:
if ts is None or ts == 'now':
if now is None:
now = datetime.utcnow()
ts = now
elif ts == 'None':
ts = None
else:
ts = datetime.strptime(ts, datetime_fmt)
rv.append(ts)
return rv
def parseArgs(args):
from socket import gethostname
hostname = gethostname()
from optparse import OptionParser
usage = 'Usage: %prog [OPTIONS] [resource_name=available_capacity]... or [resource_name=True|False[,avail_cap[,total_cap]]]...'
descr = 'List or update LOFAR RADB resource availability and/or available/total capacities'
parser = OptionParser(usage=usage, description=descr)
# already supported options: -h, --help, --
parser.add_option('-q', '--broker', dest='broker', default=DEFAULT_BROKER,
help='qpid broker hostname (default: %default).')
parser.add_option('--busname', dest='busname', default=DEFAULT_BUSNAME,
help='Name of the bus for all messaging operations (default: %default)')
parser.add_option('-G', '--resource-group-root', dest='resource_group_root', default=hostname,
help='Only consider resources under resource group root (default: this hostname: \'%default\' (all=LOFAR))')
parser.add_option('-t', '--resource-type', dest='resource_type', default=None,
help='Only consider resources of this type (e.g. storage, bandwidth, rsp, rcu, ...)')
parser.add_option('-E', '--end-past-tasks-storage-claims', dest='end_storage_claims', action='store_true', default=False,
help='WARNING: USE THIS OPTION ONLY FOR DRAGNET!. Set end time to task stoptime for storage claims under --resource-group-root for completed tasks. Implies -t storage. Can be limited to timerange given by -T and -S.')
parser.add_option('-U', '--update-available-storage-capacity', dest='update_avail', action='store_true', default=False,
help='Update the available capacity value in the RADB of storage resources under --resource-group-root. Implies -t storage. Not affected by -T and -S.')
datetime_fmt = '%Y-%m-%d %H:%M:%S'
parser.add_option('-T', '--timestart', dest='timestart',
help='lower bound UTC timestamp \'{}\' or \'now\' or \'None\' for resource claims (default: now)'.format(datetime_fmt))
parser.add_option('-S', '--timestop', dest='timestop',
help='upper bound UTC timestamp \'{}\' or \'now\' or \'None\' for resource claims (default: now)'.format(datetime_fmt))
parser.add_option('--no-scaled-units', dest='no_scaled_units', action='store_true', default=False,
help='Print raw instead of scaled units for some sizes, e.g. 1048576 instead of 1M')
options, left_over_args = parser.parse_args(args)
if options.update_avail and options.resource_group_root != hostname:
parser.error("combining the option -U with a non-default -G is rejected: it is too easy to mass-update the wrong resources")
if options.end_storage_claims or options.update_avail:
if options.resource_type is None:
options.resource_type = 'storage'
elif options.resource_type != 'storage':
parser.error("the options -E or -U cannot be combined with -t {}, because -E and -U are about storage only".format(options.resource_type))
try:
timestamps = parseTimestamps(datetime_fmt, (options.timestart, options.timestop))
except ValueError as exc:
parser.error("timestamp arguments: " + str(exc))
options.timestart = timestamps[0]
options.timestop = timestamps[1]
if options.timestart is not None and options.timestop is not None and options.timestart > options.timestop:
parser.error("-T/--timestart option value may not be after -S/--timestop option value")
resource_updates = []
for i, arg in enumerate(left_over_args):
try:
resource_updates.append(parseResourceArg(arg))
except ValueError as exc:
parser.error("failed to parse non-option argument '{}': {}".format(i, exc))
return options, resource_updates, parser.print_help
def main(args):
import os
os.environ['TZ'] = 'UTC' # LOFAR observatory software uses UTC
options, resource_updates, print_help_func = parseArgs(args)
status = 0
radb = None
try:
radb = RADBRPC.create(exchange=options.busname, broker=options.broker)
db_resource_list = radb.getResources(resource_types=options.resource_type, include_availability=True)
if options.timestart is None:
options.timestart = datetime(1970, 1, 1)
if options.timestop is None:
options.timestop = datetime(2100, 1, 1)
# Filter resource list via resource root group option
db_resource_group_mships = radb.getResourceGroupMemberships()
db_rgp2rgp = db_resource_group_mships['groups'] # resource-group-to-resource-group relations
group_id = getResourceGroupIdByName(db_rgp2rgp, options.resource_group_root)
if group_id is None:
print_help_func()
print("")
logger.error("could not find resource group '{}'. You may want to (correct the) use (of) the -G/--resource-group-root option to widen the resource scope, e.g. -G CEP4|DRAGNET|LOFAR".format(options.resource_group_root))
return 1
resource_id_list = getSubtreeResourceIdList(db_rgp2rgp, group_id)
if not resource_id_list:
print_help_func()
print("")
logger.error("no resources found under resource group '{}' and its (grand)children".format(options.resource_group_root))
return 1
resources = [res for res in db_resource_list if res['id'] in resource_id_list] # SQL could have done this better
if options.end_storage_claims:
try:
storage_resource_type_id = next((res['type_id'] for res in resources))
except StopIteration:
print_help_func()
print("")
logger.error("-E/--end-past-tasks-storage-claims used, but no storage resources found under resource group '{}' and its (grand)children".format(options.resource_group_root))
return 1
status |= updateStorageClaimsEndTime(radb, resources, storage_resource_type_id, lower_bound=options.timestart, upper_bound=options.timestop)
if options.update_avail:
status |= updateAvailableStorageCapacities(radb, resources)
if resource_updates:
status |= updateSpecifiedCapacities(radb, resources, resource_updates)
# If no specific action requested, print list of resources and claims
if not options.end_storage_claims and not options.update_avail and not resource_updates:
resource_ids = [res['id'] for res in resources]
claims = radb.getResourceClaims(lower_bound=options.timestart, upper_bound=options.timestop,
resource_ids=resource_ids, extended=True)
# A small downside of querying RADB again is that the claimable capacities might be inconsistent with claims just retrieved.
# We could derive it ourselves or stick it in a transaction, but this is good enough for the overview.
for res in resources:
res['claimable_capacity'] = radb.get_resource_claimable_capacity(resource_id=res['id'],
lower_bound=options.timestart,
upper_bound=options.timestop)
printResources(resources, not options.no_scaled_units)
print("")
printClaims(claims, not options.no_scaled_units)
#except Exception: # disabled: prefer default stacktrace on bug here
finally:
if radb is not None:
radb.close()
return status
if __name__ == '__main__':
from sys import argv, exit
exit(main(argv[1:]))
| kernsuite-debian/lofar | SAS/DataManagement/ResourceTool/resourcetool.py | Python | gpl-3.0 | 24,976 | 0.006006 |
# debugshell extension
"""a python shell with repo, changelog & manifest objects"""
import mercurial
import code
def debugshell(ui, repo, **opts):
objects = {
'mercurial': mercurial,
'repo': repo,
'cl': repo.changelog,
'mf': repo.manifest,
}
bannermsg = "loaded repo : %s\n" \
"using source: %s" % (repo.root,
mercurial.__path__[0])
code.interact(bannermsg, local=objects)
cmdtable = {
"debugshell|dbsh": (debugshell, [])
}
| iaddict/mercurial.rb | vendor/mercurial/contrib/debugshell.py | Python | mit | 533 | 0.003752 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestparametergroupingtestservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.3.0", "msrestazure>=0.3.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestParameterGroupingTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestParameterGroupingTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
| csmengwan/autorest | AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureParameterGrouping/setup.py | Python | mit | 1,158 | 0 |
# Generated by Django 1.11.16 on 2018-11-14 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0191_realm_seat_limit"),
]
operations = [
migrations.AddField(
model_name="customprofilefieldvalue",
name="rendered_value",
field=models.TextField(default=None, null=True),
),
]
| andersk/zulip | zerver/migrations/0192_customprofilefieldvalue_rendered_value.py | Python | apache-2.0 | 418 | 0 |
"""
To use the SQLite3 module we need to add an import statement to our python script:
________________________________________________________________________________
>>> import sqlite3
________________________________________________________________________________
We can check sqlite version:
________________________________________________________________________________
>>> sqlite3.verison
'2.6.0'
>>> sqlite3.sqlite_version
'3.7.17'
________________________________________________________________________________
The sqlite.version is the version of the pysqlite (2.6.0), which is the binding
of the Python language to the SQLite database. The sqlite3.sqlite_version gives
us the version of the SQLite database library. In our case it is 3.7.17.
| Valka7a/python-playground | sqlite3/tutorials/module-in-python.py | Python | mit | 756 | 0.001323 |
import os
import numpy as np
from statsmodels.duration.hazard_regression import PHReg
from numpy.testing import (assert_allclose,
assert_equal)
import pandas as pd
# TODO: Include some corner cases: data sets with empty strata, strata
# with no events, entry times after censoring times, etc.
# All the R results
from . import survival_r_results
from . import survival_enet_r_results
"""
Tests of PHReg against R coxph.
Tests include entry times and stratification.
phreg_gentests.py generates the test data sets and puts them into the
results folder.
survival.R runs R on all the test data sets and constructs the
survival_r_results module.
"""
# Arguments passed to the PHReg fit method.
args = {"method": "bfgs", "disp": 0}
def get_results(n, p, ext, ties):
if ext is None:
coef_name = "coef_%d_%d_%s" % (n, p, ties)
se_name = "se_%d_%d_%s" % (n, p, ties)
time_name = "time_%d_%d_%s" % (n, p, ties)
hazard_name = "hazard_%d_%d_%s" % (n, p, ties)
else:
coef_name = "coef_%d_%d_%s_%s" % (n, p, ext, ties)
se_name = "se_%d_%d_%s_%s" % (n, p, ext, ties)
time_name = "time_%d_%d_%s_%s" % (n, p, ext, ties)
hazard_name = "hazard_%d_%d_%s_%s" % (n, p, ext, ties)
coef = getattr(survival_r_results, coef_name)
se = getattr(survival_r_results, se_name)
time = getattr(survival_r_results, time_name)
hazard = getattr(survival_r_results, hazard_name)
return coef, se, time, hazard
class TestPHReg(object):
# Load a data file from the results directory
def load_file(self, fname):
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=" ")
time = data[:,0]
status = data[:,1]
entry = data[:,2]
exog = data[:,3:]
return time, status, entry, exog
# Run a single test against R output
def do1(self, fname, ties, entry_f, strata_f):
# Read the test data.
time, status, entry, exog = self.load_file(fname)
n = len(time)
vs = fname.split("_")
n = int(vs[2])
p = int(vs[3].split(".")[0])
ties1 = ties[0:3]
# Needs to match the kronecker statement in survival.R
strata = np.kron(range(5), np.ones(n/5))
# No stratification or entry times
mod = PHReg(time, exog, status, ties=ties)
phrb = mod.fit(**args)
coef_r, se_r, time_r, hazard_r = get_results(n, p, None, ties1)
assert_allclose(phrb.params, coef_r, rtol=1e-3)
assert_allclose(phrb.bse, se_r, rtol=1e-4)
#time_h, cumhaz, surv = phrb.baseline_hazard[0]
# Entry times but no stratification
phrb = PHReg(time, exog, status, entry=entry,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-3)
# Stratification but no entry times
phrb = PHReg(time, exog, status, strata=strata,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-4)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Stratification and entry times
phrb = PHReg(time, exog, status, entry=entry,
strata=strata, ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et_st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Run all the tests
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("survival")
and x.endswith(".csv")]
for fname in fnames:
for ties in "breslow","efron":
for entry_f in False,True:
for strata_f in False,True:
yield (self.do1, fname, ties, entry_f,
strata_f)
def test_missing(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
time[0:5] = np.nan
status[5:10] = np.nan
exog[10:15,:] = np.nan
md = PHReg(time, exog, status, missing='drop')
assert_allclose(len(md.endog), 185)
assert_allclose(len(md.status), 185)
assert_allclose(md.exog.shape, np.r_[185,4])
def test_formula(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
entry = np.zeros_like(time)
entry[0:10] = time[0:10] / 2
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1],
"exog3": exog[:, 2], "exog4": exog[:, 3],
"entry": entry})
mod1 = PHReg(time, exog, status, entry=entry)
rslt1 = mod1.fit()
fml = "time ~ 0 + exog1 + exog2 + exog3 + exog4"
mod2 = PHReg.from_formula(fml, df, status=status,
entry=entry)
rslt2 = mod2.fit()
mod3 = PHReg.from_formula(fml, df, status="status",
entry="entry")
rslt3 = mod3.fit()
assert_allclose(rslt1.params, rslt2.params)
assert_allclose(rslt1.params, rslt3.params)
assert_allclose(rslt1.bse, rslt2.bse)
assert_allclose(rslt1.bse, rslt3.bse)
def test_predict_formula(self):
n = 100
np.random.seed(34234)
time = 50 * np.random.uniform(size=n)
status = np.random.randint(0, 2, n).astype(np.float64)
exog = np.random.uniform(1, 2, size=(n, 2))
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1]})
fml = "time ~ 0 + exog1 + np.log(exog2) + exog1*exog2"
model1 = PHReg.from_formula(fml, df, status=status)
result1 = model1.fit()
from patsy import dmatrix
dfp = dmatrix(model1.data.design_info.builder, df)
pr1 = result1.predict()
pr2 = result1.predict(exog=df)
pr3 = model1.predict(result1.params, exog=dfp) # No standard errors
pr4 = model1.predict(result1.params, cov_params=result1.cov_params(), exog=dfp)
prl = (pr1, pr2, pr3, pr4)
for i in range(4):
for j in range(i):
assert_allclose(prl[i].predicted_values, prl[j].predicted_values)
prl = (pr1, pr2, pr4)
for i in range(3):
for j in range(i):
assert_allclose(prl[i].standard_errors, prl[j].standard_errors)
def test_offset(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod1 = PHReg(time, exog, status)
rslt1 = mod1.fit()
offset = exog[:,0] * rslt1.params[0]
exog = exog[:, 1:]
mod2 = PHReg(time, exog, status, offset=offset)
rslt2 = mod2.fit()
assert_allclose(rslt2.params, rslt1.params[1:])
def test_post_estimation(self):
# All regression tests
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
mart_resid = rslt.martingale_residuals
assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433)
w_avg = rslt.weighted_covariate_averages
assert_allclose(np.abs(w_avg[0]).sum(0),
np.r_[7.31008415, 9.77608674,10.89515885, 13.1106801])
bc_haz = rslt.baseline_cumulative_hazard
v = [np.mean(np.abs(x)) for x in bc_haz[0]]
w = np.r_[23.482841556421608, 0.44149255358417017,
0.68660114081275281]
assert_allclose(v, w)
score_resid = rslt.score_residuals
v = np.r_[ 0.50924792, 0.4533952, 0.4876718, 0.5441128]
w = np.abs(score_resid).mean(0)
assert_allclose(v, w)
groups = np.random.randint(0, 3, 200)
mod = PHReg(time, exog, status)
rslt = mod.fit(groups=groups)
robust_cov = rslt.cov_params()
v = [0.00513432, 0.01278423, 0.00810427, 0.00293147]
w = np.abs(robust_cov).mean(0)
assert_allclose(v, w, rtol=1e-6)
s_resid = rslt.schoenfeld_residuals
ii = np.flatnonzero(np.isfinite(s_resid).all(1))
s_resid = s_resid[ii, :]
v = np.r_[0.85154336, 0.72993748, 0.73758071, 0.78599333]
assert_allclose(np.abs(s_resid).mean(0), v)
def test_summary(self):
# smoke test
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
rslt.summary()
def test_predict(self):
# All smoke tests. We should be able to convert the lhr and hr
# tests into real tests against R. There are many options to
# this function that may interact in complicated ways. Only a
# few key combinations are tested here.
np.random.seed(34234)
endog = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(endog, exog, status)
rslt = mod.fit()
rslt.predict()
for pred_type in 'lhr', 'hr', 'cumhaz', 'surv':
rslt.predict(pred_type=pred_type)
rslt.predict(endog=endog[0:10], pred_type=pred_type)
rslt.predict(endog=endog[0:10], exog=exog[0:10,:],
pred_type=pred_type)
def test_get_distribution(self):
# Smoke test
np.random.seed(34234)
exog = np.random.normal(size=(200, 2))
lin_pred = exog.sum(1)
elin_pred = np.exp(-lin_pred)
time = -elin_pred * np.log(np.random.uniform(size=200))
mod = PHReg(time, exog)
rslt = mod.fit()
dist = rslt.get_distribution()
fitted_means = dist.mean()
true_means = elin_pred
fitted_var = dist.var()
fitted_sd = dist.std()
sample = dist.rvs()
def test_fit_regularized(self):
# Data set sizes
for n,p in (50,2),(100,5):
# Penalty weights
for js,s in enumerate([0,0.1]):
coef_name = "coef_%d_%d_%d" % (n, p, js)
coef = getattr(survival_enet_r_results, coef_name)
fname = "survival_data_%d_%d.csv" % (n, p)
time, status, entry, exog = self.load_file(fname)
exog -= exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = PHReg(time, exog, status=status, ties='breslow')
rslt = mod.fit_regularized(alpha=s)
# The agreement isn't very high, the issue may be on
# their side. They seem to use some approximations
# that we are not using.
assert_allclose(rslt.params, coef, rtol=0.3)
# Smoke test for summary
smry = rslt.summary()
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| DonBeo/statsmodels | statsmodels/duration/tests/test_phreg.py | Python | bsd-3-clause | 11,981 | 0.003506 |
# External Attribute Skeleton
#
# Input: Multi-trace, single attribute
# Output: Single attribute
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
# The attribute parameters - keep what you need
#
xa.params = {
'Inputs': ['Input'],
'ZSampMargin' : {'Value': [-30,30], 'Minimum': [-1,1], 'Symmetric': True, 'Hidden': False},
'StepOut' : {'Value': [1,1], 'Minimum': [1,1], 'Hidden': False},
'Par_0' : {'Name': 'Parameter 0', 'Value': 0},
'Par_1' : {'Name': 'Parameter 1', 'Value': 1},
'Par_2' : {'Name': 'Parameter 2', 'Value': 2},
'Par_3' : {'Name': 'Parameter 3', 'Value': 3},
'Par_4' : {'Name': 'Parameter 4', 'Value': 4},
'Par_5' : {'Name': 'Parameter 5', 'Value': 5},
'Select' : {'Name': 'Option', 'Values': ['First', 'Second', 'Third'], 'Selection': 0},
'Parallel' : False,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
#
# Define the compute function
#
def doCompute():
#
# Initialise some constants from the attribute parameters or the SeismicInfo, xa.SI, array for use in the calculations
# These are just some examples - keep/add what you need
#
number_inline = xa.SI['nrinl']
number_xline = xa.SI['nrcrl']
centre_trace_x = xa.SI['nrinl']//2
centre_trace_y = xa.SI['nrcrl']//2
nyquist = 1.0/(2.0*xa.SI['zstep'])
par0 = xa.params['Par_0']['Value']
zw = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
select = xa.params['Select']['Selection']
#
# This is the trace processing loop
#
while True:
xa.doInput()
#
# After doInput the TraceInfo, xa.TI, array contains information specific to this trace segment - keep what you need
#
number_of_samples = xa.TI['nrsamp']
start_time = xa.TI['z0']
current_inline = xa.TI['inl']
current_crossline = xa.TI['crl']
#
# Get the input
#
indata = xa.Input['Input']
#
# Your attribute calculation goes here
#
# Warning Python loops can be slow - this is contrived just to show how to access traces in the analysis data cube
#
outdata = np.zeros(number_of_samples)
for inline in range(number_inline):
for xline in range(number_xline):
if (inline != centre_trace_x and xline != centre_trace_y):
outdata += indata[inline,xline,:]
outdata /= (number_inline * number_xline - 1)
#------------------------------------------------------------------------------------
#
xa.Output = outdata
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| waynegm/OpendTect-Plugins | bin/python/wmpy/Skeletons/ex_multi_trace_single_attribute_input_single_output.py | Python | gpl-3.0 | 2,619 | 0.030546 |
from data.COMMON import * #essentials
Header( 0.001, #Script Version (for updates)
('Melee',['dat']), #model activation
('Melee',['dat']), #anim activation
['RVL_IMG'])#revolution']) #included libs
#gist number: 2757147
#for the work I've done to get this far, this should really be v3.6 heh...
#but because this is a first release for a new program, why waste it. ^_^
from data.COMMON import * #essentials + Libs
#the functions from the included libs are imported directly into COMMON
#(you don't need the lib's name to use it's function)
#def ImportGUI(): #still in development
#this function is called before ImportModel
#-----------
def ImportModel(T,C):
#####
#1+None # <-- error for testing script reloading
#####
from math import cos,sin,pi #please refrain from using imports (these will be supported by dev5)
global degrad; degrad = pi/180
#used by _Bone and _Object for storing and reading the transformed matrices
global bones; bones=[] #bone offsets
global matrices; matrices=[] #matrices ( bind,invbind = matrices[ bones.index(bone_offset) ] )
def MTX44(): return [[1.0,0.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,0.0,1.0,0.0],[0.0,0.0,0.0,1.0]]
def TransformMatrix( translate, rotate, scale ):
global degrad
cosx = cos(rotate[0] * degrad)
sinx = sin(rotate[0] * degrad)
cosy = cos(rotate[1] * degrad)
siny = sin(rotate[1] * degrad)
cosz = cos(rotate[2] * degrad)
sinz = sin(rotate[2] * degrad)
return [
[ scale[0] * cosy * cosz,
scale[1] * (sinx * cosz * siny - cosx * sinz),
scale[2] * (sinx * sinz + cosx * cosz * siny),
translate[0]],
[ scale[0] * sinz * cosy,
scale[1] * (sinx * sinz * siny + cosz * cosx),
scale[2] * (cosx * sinz * siny - sinx * cosz),
translate[1]],
[ -scale[0] * siny,
scale[1] * sinx * cosy,
scale[2] * cosx * cosy,
translate[2]],
[0.0,0.0,0.0,1.0]]
'''this block will change'''
def str_off(offset): #returns a hex-string of a given int offset '0x00000000'
STRING=(hex(offset).replace('0x','')).upper()
return '0'*(8-len(STRING))+STRING
################################################################################################
'''object-block functions'''
def vector(D_Type, exponent, offset=None, IS3D=1): #returns an [X,Y(,Z)] vector
#TODO: direct data values (if ever found)
def DataType(DT):
if DT==0: return bu8()/pow(2.0,exponent) #8bit unsigned pseudo-float
if DT==1: return bs8()/pow(2.0,exponent) #8bit signed pseudo-float
if DT==2: return bu16()/pow(2.0,exponent) #16bit unsigned pseudo-float
if DT==3: return bs16()/pow(2.0,exponent) #16bit signed pseudo-float
if DT==4: return bf32() #32bit float
if offset==None: #Direct format
return '' #yet to be seen (return blank vector)
else: #indexed format
j=Jump(offset, label=' -- Vector Data:')
vec=[DataType(D_Type),DataType(D_Type)]+([DataType(D_Type)] if IS3D else [])
Jump(j); return vec
def transform(V,M): #transform the vector via the matrix
return [((M[0][0]*V[0]) + (M[0][1]*V[1]) + (M[0][2]*V[2]) + M[0][3]),
((M[1][0]*V[0]) + (M[1][1]*V[1]) + (M[1][2]*V[2]) + M[1][3]),
((M[2][0]*V[0]) + (M[2][1]*V[1]) + (M[2][2]*V[2]) + M[2][3])]
def Ntransform(N,M): #transform the normal via the matrix
return [(M[0][0]*N[0]) + (M[0][1]*N[1]) + (M[0][2]*N[2]),
(M[1][0]*N[0]) + (M[1][1]*N[1]) + (M[1][2]*N[2]),
(M[2][0]*N[0]) + (M[2][1]*N[1]) + (M[2][2]*N[2])]
def getWeights(WOL):
Jump(WOL, label=' -- [ Weight_Offset ]:' )
ML=[]
for WO in StructArr(['bu32']): #Matrix/Weight Offset
Jump(WO[0]+32, label=' -- [ Bone_Offset , Weight ]:')
inflmtx = [[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0]]
#---restructured using the src for BrawlBox:
_weights = StructArr(['bu32','bf32'])
if len(_weights)>1:
for MO,W in _weights:
bind,invbind = matrices[bones.index(MO+32)]
'''
invbind = MtxTranspose(invbind)
invbind[0][3],invbind[1][3],invbind[2][3] = invbind[3][:3]
invbind[3][0],invbind[3][1],invbind[3][2] = [0.0,0.0,0.0]
'''
#'''
tempmtx = MtxMultiply(bind,invbind)
'''^ that's the world-transform matrix'''
for r in range(4):
for c in range(4):
inflmtx[r][c]+=tempmtx[r][c]*W
elif len(_weights)==1:
MO,W = _weights[0]
bind,invbind = matrices[bones.index(MO+32)]
for r in range(4):
for c in range(4):
inflmtx[r][c]=bind[r][c]*W
#---
'''
inflmtx = MtxTranspose(invbind)
inflmtx[0][3],inflmtx[1][3],inflmtx[2][3] = inflmtx[3][:3]
inflmtx[3][0],inflmtx[3][1],inflmtx[3][2] = [0.0,0.0,0.0]
'''
#'''
ML+=[ inflmtx ]
return ML
def getAttrs(AO): Jump(AO, label=' -- Attributes [CP_index,CP_Type,IsXYZ/NBT/A/ST,Data_Type,Exponent,unk,Stride/Format,Offset]');\
return StructArr(['bu32','bu32','bu32','bu32','bu8','bu8','bu16','bu32'],[255,'*','*','*','*','*','*','*'])
def CPT(T): #face-point index/value formats
if T == 0: return '',0 #null
if T == 1: return None,0 #direct data (handled by code)
if T == 2: return bu8(),1 #8bit index
if T == 3: return bu16(),2 #16bit index
def geometry(Attributes,weights_list):
global length;length=0
def color(fmt,offset=None,alpha=1): #currently returns [R,G,B,A] int() colors only
global length #TODO: remove to increase speed (faster if we don't redefine this function)
if offset==None: #Direct format
if fmt==0:length+=2;D=bu16(); return [
int(((D>>11)&31)*(255/31)),int(((D>>5)&63)*(255/63)),int((D&31)*(255/31)),255] #RGB565
if fmt==1:length+=3; return [bu8(),bu8(),bu8(),255] #return [bu8(),bu8(),bu8()] #RGB8
if fmt==2:length+=4; return [bu8(),bu8(),bu8(),bu8()] #RGBX8
if fmt==3:length+=2; RG,BA=bu8(),bu8(); R,G,B,A=RG>>4,RG&15,BA>>4,BA&15; return [
(R*16)+R,(G*16)+G,(B*16)+B,(A*16)+A]#RGBA4
if fmt==4:length+=3; D=bu24(); return [
(D>>18)*(255/63),((D>>12)&63)*(255/63),((D>>6)&63)*(255/63),(D&63)*(255/63)] #RGBA6
if fmt==5:length+=4; return [bu8(),bu8(),bu8(),bu8()] #RGBA8
else: #indexed format
return [255,255,255,255] #yet to be seen (returning white)
count=bu16(label=' -- Facepoint Count')
while count>0:
tmtx = MTX44() #transformations
V,N,C,U='','',['',''],['','','','','','','','']
for attr in Attributes:
I,L=CPT(attr[1]); length += L
def Get(IS3D): return vector(attr[3], attr[4], (attr[7]+(I*attr[6]))+32, IS3D)
switch(attr[0])
if case( 0): tmtx = weights_list[bu8()/3];length+=1; LABEL(' -- Weight Index/value') #vert/nor_mtx
elif case( 1): bu8()/3; length+=1 #uv[0]_mtx (value not used yet)
elif case( 2): bu8()/3; length+=1 #uv[1]_mtx
elif case( 3): bu8()/3; length+=1 #uv[2]_mtx
elif case( 4): bu8()/3; length+=1 #uv[3]_mtx
elif case( 5): bu8()/3; length+=1 #uv[4]_mtx
elif case( 6): bu8()/3; length+=1 #uv[5]_mtx
elif case( 7): bu8()/3; length+=1 #uv[6]_mtx
elif case( 8): bu8()/3; length+=1 #uv[7]_mtx
elif case( 9): LABEL(' -- Vert Index/value'); V=('' if I=='' else transform(Get(attr[2]),tmtx)) #vert
elif case(10): LABEL(' -- Normal Index/value'); N=('' if I=='' else Ntransform(Get(1),tmtx)) #normal
elif case(11): C[0]=('' if I=='' else color(attr[3],I,attr[2])); LABEL(' -- Color0 Index/value') #color0
elif case(12): C[1]=('' if I=='' else color(attr[3],I,attr[2])); LABEL(' -- Color1 Index/value') #color1
elif case(13): LABEL(' -- UV0 Index/value'); U[0]=('' if I=='' else Get(0)) #UV0
elif case(14): LABEL(' -- UV1 Index/value'); U[1]=('' if I=='' else Get(0)) #UV1
elif case(15): LABEL(' -- UV2 Index/value'); U[2]=('' if I=='' else Get(0)) #UV2
elif case(16): LABEL(' -- UV3 Index/value'); U[3]=('' if I=='' else Get(0)) #UV3
elif case(17): LABEL(' -- UV4 Index/value'); U[4]=('' if I=='' else Get(0)) #UV4
elif case(18): LABEL(' -- UV5 Index/value'); U[5]=('' if I=='' else Get(0)) #UV5
elif case(19): LABEL(' -- UV6 Index/value'); U[6]=('' if I=='' else Get(0)) #UV6
elif case(20): LABEL(' -- UV7 Index/value'); U[7]=('' if I=='' else Get(0)) #UV7
elif case(21): pass #vert_mtx_arr
elif case(22): pass #normal_mtx_arr
elif case(23): pass #uv_mtx_arr
elif case(24): pass #light_mtx_array
elif case(25): LABEL(' -- NBT Index/value'); N=('' if I=='' else Get(1))#transform(Get(1),tmtx)) #NBT (NX,NY,NZ, BX,BY,BZ, TX,TY,TZ)
elif case(255):pass #CP_NULL
'''NBT:
0x000003C07C: read 0x00000019 as 25 CP_index
0x000003C080: read 0x00000003 as 3 CP_Type
0x000003C084: read 0x00000001 as 1 isNBT
0x000003C088: read 0x00000004 as 4 Data_Type
0x000003C08C: read 0x00 as 0 Exponent
0x000003C08D: read 0x00 as 0 unk
0x000003C08E: read 0x0024 as 36 stride
0x000003C090: read 0x00007200 as 29184 offset
'''
#NOTE: currently NBT just evaluates the normal vector
#this is because the bi-normal and tangent vectors are not supported in this development
#this has already been fixed in the upcoming dev5 release ;)
count-=1
SetFacepoint(V,N,tuple(C),tuple(U))
if len(tmtx): (SetWeight( N, W ) for N,M,W in tmtx)
return length+3
#-----------------------------------------------------------------------------------------------
'''read a DAT object'''
def _Mesh(offset,Material): #pobj
Jump(offset, label=' -- Object Struct')
me_unk = bu32( label=' -- Unknown')
Next = bu32( label=' -- Next Object Struct Offset')+32
attrs = bu32( label=' -- Facepoint Attributes Offset')+32
me_flags = bu16(label=' -- Unknown Object Flags')
DL_size = bu16( label=' -- Display List Size')*32
DL_data = bu32( label=' -- Display List Offset')+32
weights = bu32( label=' -- Weight Offsets List Offset:')+32
#SetActiveMaterial('mat'+str_off(Material)) #hack (somewhat)
name='obj'+str_off(offset)
SetObject( name, ParentName='joint_obj') #create a new mesh object
weights_list=(getWeights(weights) if weights>32 else [])
Attributes=getAttrs(attrs)
Jump(DL_data);LABEL(' -- Display List:')
while DL_size>0:
Switch(bh8())
if Case('78'): t,n = UMC_POLYGON, 'Polygon' #possible support based on pattern (hasn't actually been seen)
elif Case('80'): t,n = UMC_QUADS, 'Quads'
elif Case('88'): t,n = UMC_QUADSTRIP, 'QuadStrip' #possible support based on pattern (hasn't actually been seen)
elif Case('90'): t,n = UMC_TRIANGLES, 'Triangles'
elif Case('98'): t,n = UMC_TRIANGLESTRIP,'TriangleStrip'
elif Case('A0'): t,n = UMC_TRIANGLEFAN, 'TriangleFan'
elif Case('A8'): t,n = UMC_LINES, 'Lines'
elif Case('B0'): t,n = UMC_LINESTRIP, 'LineStrip'
elif Case('B8'): t,n = UMC_POINTS, 'Points'
else: t=''
if t!='':
LABEL(' -- Primitive Type: '+n)
SetPrimitive(t)
DL_size-=geometry(Attributes,weights_list)
else: DL_size-=1
SetMaterial('mat'+str_off(Material))
if Next>32: _Mesh(Next,Material)
################################################################################################
'''read a DAT image structure'''
def _Image(offset):
Jump(offset, label=' -- Image Struct')
Data = bu32( label=' -- Pixel Data Offset')
Width = bu16( label=' -- Width')
Height = bu16( label=' -- Height')
Format = bu32( label=' -- Format')
Jump(Data, label=' -- Pixel Data')
img = readimg(Width,Height,Format) #the library works, yay! ^_^
name='img'+str_off(offset)
SetImage(name,Width,Height,img)
################################################################################################
'''read a DAT texture structure'''
def _Texture(offset):
Jump(offset, label=' -- Texture Struct')
name='tex'+str_off(offset)
tx_unk = bu32( label=' -- Unknown')
Next = bu32( label=' -- Next Texture Offset')+32
tx_unk3 = bu32( label=' -- Unknown')
LayerFlags = bu32( label=' -- Layer Flags')
tx_unk4 = bu32( label=' -- Unknown')
tx_unk5 = bu32( label=' -- Unknown')
tx_unk6 = bu32( label=' -- Unknown')
tx_unk7 = bf32( label=' -- Unknown')
MaxX = bf32( label=' -- (?) TexClamp Max X')
MaxY = bf32( label=' -- (?) TexClamp Max Y')
Angle = bf32( label=' -- (?) TexClamp Angle')
MinX = bf32( label=' -- (?) TexClamp Min X')
MinY = bf32( label=' -- (?) TexClamp Min Y')
WrapS = bu32( label=' -- Wrap S')
WrapT = bu32( label=' -- Wrap T')
ScaleX = bu8( label=' -- Scale X')
ScaleY = bu8( label=' -- Scale Y')
tx_unk8 = bu16( label=' -- Unknown')
tx_unk9 = bu32( label=' -- Unknown') #or bu16(['',''])
tx_unk10 = bf32( label=' -- Unknown')
tx_unk11 = bu32( label=' -- Unknown')
Image = bu32( label=' -- Image Offset')+32
Pallet = bu32( label=' -- Pallet Offset')+32
tx_unk12 = bu32( label=' -- Unknown')
tx_unk13 = bu32( label=' -- Unknown Offset')+32
#TODO: apply texture settings
SetTexture(name)
if Image>32: _Image(Image)
if Next>32: _Texture(Next)
################################################################################################
'''read a DAT material-colors structure'''
def _Colors(offset):
Jump(offset, label=' -- Material-Colors Struct')
D = bu8(['','','',''], label=' -- Diffuse')
A = bu8(['','','',''], label=' -- Ambient')
S = bu8(['','','',''], label=' -- Specular')
cl_unk = bf32( label=' -- Unknown') #usually 1.0
cl_unk2 = bf32( label=' -- Unknown') #seen 50.0 (shininess??)
SetMatColors(A,D,S)
################################################################################################
'''read the data structure of a DAT material'''
def _Material(offset):
Jump(offset, label=' -- Material Struct')
name='mat'+str_off(offset)
ma_unk = bu32( label=' -- Unknown')
ma_flags = bu32(label=' -- Unknown Flags')
Texture = bu32( label=' -- Texture Offset')+32
Colors = bu32( label=' -- Material Colors Offset')+32
ma_unk2 = bu32( label=' -- Unknown')
ma_unk3 = bu32( label=' -- Unknown')
SetMaterial(name)
if Colors>32: _Colors(Colors) #colors are already defaulted
if Texture>32: _Texture(Texture)
################################################################################################
'''read the data structure of a DAT mesh object'''
def _Object(offset):
Jump(offset, label=' -- Mesh Struct')
ob_unk = bu32( label=' -- Unknown')
Next = bu32( label=' -- Next Object Struct Offset')+32
Material = bu32(label=' -- Material Struct Offset')+32
Mesh = bu32( label=' -- Mesh Struct Offset')+32
if Mesh>32: _Mesh(Mesh,Material)
if Material>32: _Material(Material)
if Next>32: _Object(Next)
################################################################################################
'''read and transform a DAT bone before storing it's data'''
def _Bone(offset, parent='', prev='', pbm=MTX44(), pibm=MTX44(), rig = 'joint_obj'):
Jump(offset, label=' -- Bone Struct')
bn_unk1 = bu32( label=' -- Unknown')
Flags = bu32( label=' -- Unknown Bone Flags')
Child = bu32( label=' -- Child Bone Struct Offset')+32
Next = bu32( label=' -- Next Bone Struct Offset')+32
Object = bu32( label=' -- Object Struct Offset')+32
RX = bf32(label=' -- Rotation X')
RY = bf32(label=' -- Rotation Y')
RZ = bf32(label=' -- Rotation Z')
SX = bf32(label=' -- Scale X')
SY = bf32(label=' -- Scale Y')
SZ = bf32(label=' -- Scale Z')
LX = bf32(label=' -- Location X')
LY = bf32(label=' -- Location Y')
LZ = bf32(label=' -- Location Z')
inv_off = bu32( label=' -- Inverse-Bind Matrix Offset')+32
bn_unk2 = bu32( label=' -- Unknown')
if inv_off>32: #World Inverse-Bind
Jump(inv_off, label=' -- Inverse-Bind Matrix')
ibm = [ [bf32(label=' -- XX'),bf32(label=' -- XY'),bf32(label=' -- XZ'),bf32(label=' -- XW')],
[bf32(label=' -- YX'),bf32(label=' -- YY'),bf32(label=' -- YZ'),bf32(label=' -- YW')],
[bf32(label=' -- ZX'),bf32(label=' -- ZY'),bf32(label=' -- ZZ'),bf32(label=' -- ZW')],
[0.0,0.0,0.0,1.0]] #mtx44
'''
bilt = MtxTranspose(bilt)
bilt[0][3],bilt[1][3],bilt[2][3] = bilt[3][:3]
bilt[3][0],bilt[3][1],bilt[3][2] = [0.0,0.0,0.0]
'''
#'''
else: #load default matrix
ibm=pibm #MTX44()
#tm = TransformMatrix( [LX,LY,LZ], [RX,RY,RZ], [SX,SY,SZ] )
bm = MtxInvert(ibm) #MtxMultiply( tm, pbm )
LX,LY,LZ = bm[0][3],bm[1][3],bm[2][3] #transform bone Loc with the matrix
SetObject(rig) #active object being the rig object
pa = 'bn'+str_off(parent) if parent!='' else ''
pr = 'bn'+str_off(prev) if prev!='' else ''
SetBone('bn'+str_off(offset),0,[LX,LY,LZ,RX,RY,RZ,SX,SY,SZ],ibm,pa,pr)
global bones; bones+=[offset]
global matrices; matrices+=[[bm,ibm]]
if Child>32: _Bone(Child,offset,'',bm,ibm, rig=rig)
if Next>32: _Bone(Next,parent,offset,pbm,pibm, rig=rig)
if Object>32: _Object(Object)
################################################################################################
def _Bone_Set(offset,rig = 'joint_obj'):
SetObject(rig,24)
_Bone(offset, rig=rig)
'''unknown structures:'''
def _Struct2(offset):
Jump(offset, label=' -- Unknown Struct 2')
ptr0 = bu32( label=' -- Object Bone Pointer')+32
s2u0 = bu32( label=' -- Unknown')
s2u1 = bf32( label=' -- Unknown')
s2u2 = bu32( label=' -- Unknown')
apt0 = bu32( label=' -- Attribute Pointer')+32
apt1 = bu32( label=' -- Attribute Pointer')+32
apt2 = bu32( label=' -- Attribute Pointer')+32
apt3 = bu32( label=' -- Attribute Pointer')+32
apt4 = bu32( label=' -- Attribute Pointer')+32
apt5 = bu32( label=' -- Attribute Pointer')+32
ptr1 = bu32( label=' -- Bone Pointer')+32
if ptr0>32: _Bone_Set(ptr0,'joint_obj')
if ptr1>32: _Bone_Set(ptr1,'joint_obj2')
def _Struct1(offset):
Jump(offset, label=' -- Unknown Struct 1')
ptr0 = bu32( label=' -- Unknown Pointer')+32
ptr1 = bu32( label=' -- Struct 2 Pointer')+32
ptr2 = bu32( label=' -- Unknown Pointer')+32
matrix = bu32( label=' -- 3x4 Matrix Pointer')+32
ptr4 = bu32( label=' -- Struct 3 Pointer')+32
ptr5 = bu32( label=' -- Unknown Pointer')+32
ptr6 = bu32( label=' -- Struct 7 Pointer')+32
if ptr1>32: _Struct2(ptr1)
def _MatAnim(offset): pass
################################################################################################
#main header:
block_size = bu32( label=' -- DataBlock Size')
pointer_tbl = bu32( label=' -- Pointer Table Offset')+32
pointer_cnt = bu32( label=' -- Pointer Count')
root_cnt = bu32( label=' -- Root Structure Count')
ref_cnt = bu32( label=' -- Reference Structure Count')
tmp1 = bu32( label=' -- Unknown') #001B?
unk1 = bu32( label=' -- Pad?')
unk2 = bu32( label=' -- Pad?')
structs = {
#structure size : [ [structure, [local pointer addresses]] ]
#validate that the existing pointers match these positions.
12:[
[_MatAnim, [0,8]],
],
64:[
[_Bone_Set, [12,16,20,60]],
],
48:[
[_Struct1, [0,4,8,12,16,20,24]],
],
}
jump(pointer_tbl)
pointer_addrs = [ p+32 for p in bu32(['']*pointer_cnt, label=' -- Pointer Reference Address') ]
laststruct=0 #for checking if the last structure in the data is behind the root structure
pointer = []
for addr in pointer_addrs:
jump(addr)
ptr = bu32( label=' -- Pointer')+32
if ptr > laststruct: laststruct = ptr #top-most structure
pointer += [ptr]
for i in range(root_cnt):
Jump(pointer_tbl+(pointer_cnt*4)+(i*8), label=' -- Root Structs') #get to the base node
root_offset = bu32(label=' -- Data Offset')+32
string_offset = bu32(label=' -- String Offset') #prbly a dictionary key: { str(key): value }
'''old code:
last=Jump(offset_tbl+(num_offsets*4)+((num_bases+num_refs)*8)+bu32(label=' -- String Offset'), label=' -- String')
base_name=String().split('_')#; jump(last)
'''
# root_offset < rootstructsize < pointer_tbl
root_size=pointer_tbl
if root_offset < laststruct:
for ptr in pointer:
if root_size > ptr and ptr > root_offset: root_size=ptr
else: root_size=pointer_tbl
root_size-=root_offset
#'''new code:
struct_cnt = len(structs[root_size])-1
for struct_id,(struct,ptrs) in enumerate(structs[root_size]):
bad_struct = 0
for ptr in ptrs:
if root_offset+ptr in pointer_addrs: continue
else: #unmarked pointer
jump(root_offset+ptr)
if bu32()>0: bad_struct = 1 #unmarked (valid) pointers are 0, otherwize it's data
if not bad_struct: #all pointer tests passed
try: #doesn't mean the data was valid
struct(root_offset) #try to read the data
break #if it worked w/o error, we have the right structure
except: #TODO: need a method to flush UMC's data
LABEL('\n\n-- mistaken structure')
if struct_id<struct_cnt: LABEL(', continuing --\n\n')
else: LABEL(', no working structures found --\n\n')
pass
#'''
'''
if i==0: #index has something to do with selection
switch(root_size) #we now have the absolute size of the root structure
if case(64): #set a rig object for bones, and child mesh objects (UMC formatting)
SetObject('joint_obj',24) #last viewport (24)
_Bone(root_offset) #begin reading the model
#elif case(48): _Struct1(root_offset) #TODO
#''' | Universal-Model-Converter/UMC3.0a | scripts/SSBM.py | Python | mit | 25,037 | 0.037385 |
import statsmodels.api as sm
from . import common_fields
from . import make_gaps
from . import tools
from .device_event import make_alarm_event
def apply_loess(solution, num_days, gaps):
"""Solves the blood glucose equation over specified period of days
and applies a loess smoothing regression to the data
Returns numpy arrays for glucose and time values
"""
#solving for smbg valuesn
smbg_gluc = solution[:, 1]
smbg_time = solution[:, 2]
#make gaps in cbg data, if needed
solution = make_gaps.gaps(solution, num_days=num_days, gaps=gaps)
#solving for cbg values
cbg_gluc = solution[:, 1]
cbg_time = solution[:, 2]
#smoothing blood glucose eqn
lowess = sm.nonparametric.lowess
smoothing_distance = 1.5 #1.5 minutes
fraction = (smoothing_distance / (num_days * 60 * 24)) * 100
result = lowess(cbg_gluc, cbg_time, frac=fraction, is_sorted=True)
smoothed_cbg_time = result[:, 0]
smoothed_cbg_gluc = result[:, 1]
return smoothed_cbg_gluc, smoothed_cbg_time, smbg_gluc, smbg_time
def cbg(gluc, timesteps, zonename):
""" construct cbg events
gluc -- a list of glucose values at each timestep
timesteps -- a list of epoch times
zonename -- name of timezone in effect
"""
cbg_data = []
for value, timestamp in zip(gluc, timesteps):
cbg_reading = {}
cbg_reading = common_fields.add_common_fields('cbg', cbg_reading, timestamp, zonename)
cbg_reading["value"] = tools.convert_to_mmol(value)
cbg_reading["units"] = "mmol/L"
if value > 400:
cbg_reading["annotation"] = [{"code": "bg/out-of-range", "threshold": 400, "value": "high"}]
cbg_reading["value"] = tools.convert_to_mmol(401)
elif value < 40:
cbg_reading["annotation"] = [{"code": "bg/out-of-range", "threshold": 40, "value": "low"}]
cbg_reading["value"] = tools.convert_to_mmol(39)
#add a device meta alarm for low insulin reading
meta_alarm = make_alarm_event(timestamp, zonename)
cbg_data.append(meta_alarm)
cbg_data.append(cbg_reading)
return cbg_data
| tidepool-org/dfaker | dfaker/cbg.py | Python | bsd-2-clause | 2,185 | 0.008696 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'logger_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('metadata', self.gf('jsonfield.fields.JSONField')(blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(related_name='project_org', to=orm['auth.User'])),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='project_owner', to=orm['auth.User'])),
('shared', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('logger', ['Project'])
# Adding M2M table for field user_stars on 'Project'
m2m_table_name = db.shorten_name(u'logger_project_user_stars')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['logger.project'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['project_id', 'user_id'])
# Adding unique constraint on 'Project', fields ['name', 'organization']
db.create_unique(u'logger_project', ['name', 'organization_id'])
# Adding model 'ProjectXForm'
db.create_table(u'logger_projectxform', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform', self.gf('django.db.models.fields.related.ForeignKey')(related_name='px_xforms', to=orm['logger.XForm'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='px_projects', to=orm['logger.Project'])),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='px_creator', to=orm['auth.User'])),
))
db.send_create_signal('logger', ['ProjectXForm'])
# Adding unique constraint on 'ProjectXForm', fields ['xform', 'project']
db.create_unique(u'logger_projectxform', ['xform_id', 'project_id'])
def backwards(self, orm):
# Removing unique constraint on 'ProjectXForm', fields ['xform', 'project']
db.delete_unique(u'logger_projectxform', ['xform_id', 'project_id'])
# Removing unique constraint on 'Project', fields ['name', 'organization']
db.delete_unique(u'logger_project', ['name', 'organization_id'])
# Deleting model 'Project'
db.delete_table(u'logger_project')
# Removing M2M table for field user_stars on 'Project'
db.delete_table(db.shorten_name(u'logger_project_user_stars'))
# Deleting model 'ProjectXForm'
db.delete_table(u'logger_projectxform')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.attachment': {
'Meta': {'object_name': 'Attachment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'logger.instance': {
'Meta': {'object_name': 'Instance'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryCollectionField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': "orm['logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.instancehistory': {
'Meta': {'object_name': 'InstanceHistory'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['logger.Instance']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.note': {
'Meta': {'object_name': 'Note'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['logger.Instance']"}),
'note': ('django.db.models.fields.TextField', [], {})
},
'logger.project': {
'Meta': {'unique_together': "(('name', 'organization'),)", 'object_name': 'Project'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_owner'", 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('jsonfield.fields.JSONField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_org'", 'to': u"orm['auth.User']"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_stars': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'project_stars'", 'symmetrical': 'False', 'to': u"orm['auth.User']"})
},
'logger.projectxform': {
'Meta': {'unique_together': "(('xform', 'project'),)", 'object_name': 'ProjectXForm'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'px_creator'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'px_projects'", 'to': "orm['logger.Project']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'px_xforms'", 'to': "orm['logger.XForm']"})
},
'logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'instances_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.ziggyinstance': {
'Meta': {'object_name': 'ZiggyInstance'},
'client_version': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.CharField', [], {'max_length': '249'}),
'form_instance': ('django.db.models.fields.TextField', [], {}),
'form_version': ('django.db.models.fields.CharField', [], {'default': "u'1.0'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '249'}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggys'", 'to': u"orm['auth.User']"}),
'server_version': ('django.db.models.fields.BigIntegerField', [], {}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggy_submissions'", 'null': 'True', 'to': "orm['logger.XForm']"})
}
}
complete_apps = ['logger']
| spatialdev/onadata | onadata/apps/logger/migrations/0048_auto__add_project__add_unique_project_name_organization__add_projectxf.py | Python | bsd-2-clause | 16,039 | 0.007419 |
import os
from flask import Flask, url_for, request, render_template, jsonify, send_file
from werkzeug.utils import secure_filename
import deepchem as dc
import subprocess
from shutil import copyfile
import csv
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static/')
DEEPCHEM_GUI = Flask('deepchem-gui', static_folder=STATIC_DIR,
static_url_path='/static',
template_folder=os.path.join(STATIC_DIR, 'deepchem-gui',
'templates')
)
UPLOAD_DIR = os.path.join(STATIC_DIR, "data/")
if not os.path.isdir(UPLOAD_DIR):
os.mkdir(UPLOAD_DIR)
print("Created data directory")
# serve ngl webapp clone
@DEEPCHEM_GUI.route('/')
def webapp():
return render_template('webapp.html')
# download protein and ligand files
@DEEPCHEM_GUI.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
proteins = request.files.getlist('proteins')
ligands = request.files.getlist('ligands')
smiles = request.files.getlist('smiles')
smarts = request.files.getlist('smarts')
if proteins and ligands:
protein_fns = []
ligand_fns = []
for protein in proteins:
protein_fn = os.path.join(
UPLOAD_DIR,
secure_filename(protein.filename)
)
protein.save(protein_fn)
protein_fns.append(protein_fn)
for ligand in ligands:
ligand_fn = os.path.join(
UPLOAD_DIR,
secure_filename(ligand.filename)
)
ligand.save(ligand_fn)
ligand_fns.append(ligand_fn)
docking_result = dock(protein_fns, ligand_fns)
print(docking_result)
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = docking_result[i][j]["protein"]
new_protein_fn = protein_fn.split("/")[-1]
copyfile(protein_fn, os.path.join(
UPLOAD_DIR, new_protein_fn))
docking_result[i][j]["protein"] = url_for(
'static', filename="data/" + new_protein_fn)
ligand_fn = docking_result[i][j]["ligand"]
new_ligand_fn = ligand_fn.split("/")[-1]
copyfile(ligand_fn,
os.path.join(UPLOAD_DIR, new_ligand_fn))
docking_result[i][j]["ligand"] = url_for(
'static', filename="data/" + new_ligand_fn)
return jsonify(docking_result)
elif smiles:
smiles = smiles[0]
smiles_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smiles.filename)
)
smiles.save(smiles_fn)
csvfile = open(smiles_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smiles(data)
return jsonify(data)
elif smarts:
smarts = smarts[0]
smarts_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smarts.filename)
)
smarts.save(smarts_fn)
csvfile = open(smarts_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smarts(data)
return jsonify(data)
else:
return jsonify(error_msg="Invalid file transfer.")
else:
raise NotImplementedError
def render_smiles(data):
smiles_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMILES"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("SMILES IMG")
continue
try:
smiles_str = data[i][smiles_col_idx]
smiles = Chem.MolFromSmiles(smiles_str)
AllChem.Compute2DCoords(smiles)
smiles_fn = 'smiles_%d.png' % i
smiles_img = os.path.join(UPLOAD_DIR, smiles_fn)
Draw.MolToFile(smiles, smiles_img)
data[i].append(url_for('static', filename='data/' + smiles_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
pass
return data
def render_smarts(data):
smarts_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMARTS"][0]
smiles_col_idx_1 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_1"][0]
smiles_col_idx_2 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_2"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("PRODUCT")
data[i].append("SMILES_1 IMG")
data[i].append("SMILES_2 IMG")
data[i].append("PRODUCT IMG")
continue
try:
smarts_str = data[i][smarts_col_idx]
smiles_str_1 = data[i][smiles_col_idx_1]
smiles_str_2 = data[i][smiles_col_idx_2]
rxn = AllChem.ReactionFromSmarts(smarts_str)
ps = rxn.RunReactants((Chem.MolFromSmiles(smiles_str_1), Chem.MolFromSmiles(smiles_str_2)))
product = ps[0][0]
product_str = Chem.MolToSmiles(product)
data[i].append(product_str)
AllChem.Compute2DCoords(product)
product_fn = 'product_%d.png' % i
product_img = os.path.join(UPLOAD_DIR, product_fn)
Draw.MolToFile(product, product_img)
smiles_1 = Chem.MolFromSmiles(smiles_str_1)
AllChem.Compute2DCoords(smiles_1)
smiles_1_fn = 'smiles_1_%d.png' % i
smiles_1_img = os.path.join(UPLOAD_DIR, smiles_1_fn)
Draw.MolToFile(smiles_1, smiles_1_img)
smiles_2 = Chem.MolFromSmiles(smiles_str_2)
AllChem.Compute2DCoords(smiles_2)
smiles_2_fn = 'smiles_2_%d.png' % i
smiles_2_img = os.path.join(UPLOAD_DIR, smiles_2_fn)
Draw.MolToFile(smiles_2, smiles_2_img)
data[i].append(url_for('static', filename='data/' + product_fn))
data[i].append(url_for('static', filename='data/' + smiles_1_fn))
data[i].append(url_for('static', filename='data/' + smiles_2_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
data[i].append("Invalid")
data[i].append("Invalid")
pass
return data
def dock(protein_fns, ligand_fns):
docking_result = [[{} for j in range(len(ligand_fns))]
for i in range(len(protein_fns))]
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = protein_fns[i]
ligand_fn = ligand_fns[j]
print("Docking: %s to %s" % (ligand_fn, protein_fn))
docker = dc.dock.VinaGridDNNDocker(
exhaustiveness=1, detect_pockets=False)
(score, (protein_docked, ligand_docked)
) = docker.dock(protein_fn, ligand_fn)
print("Scores: %f" % (score))
print("Docked protein: %s" % (protein_docked))
print("Docked ligand: %s" % (ligand_docked))
ligand_docked_fn = ligand_docked.replace(".pdbqt", "")
subprocess.call("csh %s %s" % (os.path.join(STATIC_DIR, 'deepchem-gui', 'scripts', 'stripqt.sh'),
ligand_docked_fn), shell=True)
ligand_docked_pdb = ligand_docked_fn + ".pdb"
docking_result[i][j] = {'score': score[
0], 'protein': protein_docked, 'ligand': ligand_docked_pdb}
return docking_result
| deepchem/deepchem-gui | gui/app.py | Python | gpl-3.0 | 8,020 | 0.001995 |
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for programs."""
from soc.models import program as program_model
def getSponsorKey(program):
"""Returns key which represents Sponsor of the specified program.
Args:
program: program entity
Returns:
db.Key instance of the sponsor for the specified program
"""
return program_model.Program.sponsor.get_value_for_datastore(program)
| rhyolight/nupic.son | app/soc/logic/program.py | Python | apache-2.0 | 944 | 0.002119 |
from functools import wraps
import logging
import traceback
from django.conf import settings
from django.core import management
from django.contrib import messages
from django.contrib.auth import authenticate
from django.http import Http404
from django.shortcuts import redirect
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from pykeg.core import defaults
from pykeg.core import models
from pykeg.util import dbstatus
from pykeg.core.util import get_version_object
from .forms import AdminUserForm
from .forms import MiniSiteSettingsForm
logger = logging.getLogger(__name__)
def setup_view(f):
"""Decorator for setup views."""
def new_function(*args, **kwargs):
request = args[0]
if not settings.DEBUG:
raise Http404("Site is not in DEBUG mode.")
if request.kbsite and request.kbsite.is_setup:
raise Http404("Site is already setup, wizard disabled.")
return f(*args, **kwargs)
return wraps(f)(new_function)
@setup_view
@never_cache
def start(request):
"""Shows database setup button"""
context = {}
if request.method == "POST":
try:
management.call_command("migrate", no_input=True)
return redirect("setup_mode")
except Exception as e:
logger.exception("Error installing database")
context["error_message"] = str(e)
context["error_stack"] = traceback.format_exc()
else:
try:
logger.info("Checking database status ...")
dbstatus.check_db_status()
logger.info("Database status OK.")
except dbstatus.DatabaseNotInitialized:
context["need_install"] = True
except dbstatus.NeedMigration:
context["need_upgrade"] = True
return render(request, "setup_wizard/start.html", context=context)
@setup_view
@never_cache
def mode(request):
"""Shows the enable/disable hardware toggle."""
context = {}
if request.method == "POST":
if "enable_sensing" in request.POST:
response = redirect("setup_accounts")
response.set_cookie("kb_setup_enable_sensing", "True")
return response
elif "disable_sensing" in request.POST:
response = redirect("setup_site_settings")
response.set_cookie("kb_setup_enable_sensing", "False")
response.set_cookie("kb_setup_enable_users", "False")
return response
else:
messages.error(request, "Unknown response.")
return render(request, "setup_wizard/mode.html", context=context)
@setup_view
@never_cache
def upgrade(request):
context = {}
if request.method == "POST":
try:
management.call_command("migrate", no_input=True)
site = models.KegbotSite.get()
app_version = get_version_object()
site.server_version = str(app_version)
site.save()
return redirect("kb-home")
except Exception as e:
logger.exception("Error installing database")
context["error_message"] = str(e)
context["error_stack"] = traceback.format_exc()
try:
logger.info("Checking database status ...")
dbstatus.check_db_status()
logger.info("Database status OK.")
except dbstatus.DatabaseNotInitialized:
context["message"] = "Database not initialized"
except dbstatus.NeedMigration:
context["message"] = "Database upgrade needed"
return render(request, "setup_wizard/upgrade.html", context=context)
@setup_view
@never_cache
def setup_accounts(request):
""" Shows the enable/disable accounts toggle. """
context = {}
if request.method == "POST":
if "enable_users" in request.POST:
response = redirect("setup_site_settings")
response.set_cookie("kb_setup_enable_users", "True")
return response
elif "disable_users" in request.POST:
response = redirect("setup_site_settings")
response.set_cookie("kb_setup_enable_users", "False")
return response
else:
messages.error(request, "Unknown response.")
return render(request, "setup_wizard/accounts.html", context=context)
@setup_view
@never_cache
def site_settings(request):
context = {}
if request.method == "POST":
site = models.KegbotSite.get()
form = MiniSiteSettingsForm(request.POST, instance=site)
if form.is_valid():
form.save()
messages.success(request, "Settings saved!")
return redirect("setup_admin")
else:
try:
defaults.set_defaults()
except defaults.AlreadyInstalledError:
pass
site = models.KegbotSite.get()
site.enable_sensing = request.COOKIES.get("kb_setup_enable_sensing") == "True"
site.enable_users = request.COOKIES.get("kb_setup_enable_users") == "True"
site.save()
form = MiniSiteSettingsForm(instance=site)
context["form"] = form
return render(request, "setup_wizard/site_settings.html", context=context)
@setup_view
@never_cache
def admin(request):
context = {}
form = AdminUserForm()
if request.method == "POST":
form = AdminUserForm(request.POST)
if form.is_valid():
form.save()
user = authenticate(
username=form.cleaned_data.get("username"),
password=form.cleaned_data.get("password"),
)
return redirect("setup_finish")
context["form"] = form
return render(request, "setup_wizard/admin.html", context=context)
@setup_view
@never_cache
def finish(request):
context = {}
if request.method == "POST":
site = models.KegbotSite.get()
site.is_setup = True
site.save()
messages.success(request, "Tip: Install a new Keg in Admin: Taps")
return redirect("kb-home")
return render(request, "setup_wizard/finish.html", context=context)
| Kegbot/kegbot-server | pykeg/web/setup_wizard/views.py | Python | gpl-2.0 | 6,069 | 0.00033 |
# -*- coding: utf-8 -*-
"""Module that helps in checking the correctness of CSV file structure."""
| TMiguelT/csvschema | csv_schema/__init__.py | Python | mit | 103 | 0 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from footprint.main.models.presentation.layer_style import LayerStyle
__author__ = 'calthorpe_analytics'
class DefaultLayerStyle(LayerStyle):
"""
The default LayerStyle for newly created Layers that don't match
any more specific LayerStyle subclasses
"""
model_class = object
| CalthorpeAnalytics/urbanfootprint | footprint/client/configuration/default/layer_style/default_layer_style.py | Python | gpl-3.0 | 793 | 0.001261 |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Add the parent 'ast_parser' module to PYTHONPATH
# for consistency between Python and pytest
# Pytest root dir: '$REPO_ROOT/'
# Python root dir: '$REPO_ROOT/ast_parser'
import sys
sys.path.append('..')
| GoogleCloudPlatform/repo-automation-playground | xunit-autolabeler-v2/ast_parser/core/__init__.py | Python | apache-2.0 | 783 | 0 |
#!/usr/bin/env python -t
# -*- coding: UTF-8 -*-
import codecs
import urllib
class HtmlOutputer(object):
def __init__(self):
self.datas = []
def collect_data(self,data):
if data is None:
return
self.datas.append(data)
def output_html(self):
fout = open('output.html','w')
"""
for data in self.datas:
print data['url'].encode("utf-8"), type(data['url'].encode("utf-8"))
print urllib.unquote(data['url'].encode("utf-8")) ,type(urllib.unquote(data['url'].encode("utf-8")))
#print urllib.unquote(data['url']).encode("utf-8") , type(urllib.unquote(data['url']).encode("utf-8"))
#print data['title'],type(data['title'])
#print data['summary'],type(data['summary'])
"""
fout.write("<html>")
fout.write("<head>")
fout.write("<meta charset='UTF-8'>")
fout.write("</head>")
fout.write("<body>")
fout.write("<table>")
# 默认编码ascii
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s</td>" % urllib.unquote(data['url'].encode('utf-8')))
fout.write("<td>%s</td>" % data['title'].encode('utf-8'))
fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))
fout.write("</tr>")
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
| guanxin0206/dice_crawler | dice_spider_2/spider/html_outputer.py | Python | bsd-2-clause | 1,497 | 0.008059 |
import unittest
from twitter_bot import messages
class TestBaseMessageProvider(unittest.TestCase):
def test_extract_hashtags_empty_mention(self):
provider = messages.BaseMessageProvider()
hashtags = provider._extract_hashtags({})
self.assertEqual([], hashtags)
def test_extract_hashtags_with_hashtags(self):
mention = {'entities': {'hashtags': [{'text': 'love'}, {'text': 'hate'}]}}
provider = messages.BaseMessageProvider()
hashtags = provider._extract_hashtags(mention)
self.assertEqual(['love', 'hate'], hashtags)
def test_create(self):
provider = messages.BaseMessageProvider()
try:
provider.create({}, 20)
self.fail("Should not be able to call create() on abstract parent class")
except NotImplementedError as e:
error = 'Child class must implement create(self, mention, max_message_length)'
self.assertEqual(error, '{0}'.format(e))
| jessamynsmith/twitterbot | tests/messages/test_base.py | Python | mit | 989 | 0.003033 |
# Copyright Aaron Smith 2009
#
# This file is part of Gity.
#
# Gity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gity. If not, see <http://www.gnu.org/licenses/>.
from _util import *
try:
import sys,re,os,subprocess
except Exception, e:
sys.stderr.write(str(e))
exit(84)
command=""
try:
from _argv import *
if not checkfiles(options): raise Exception("Gity Error: The add command requires files! They weren't set.")
gitcommand="add"
command="%s %s %s %s"%(options.git,gitcommand,"--ignore-errors",make_file_list_for_git(options.files))
rcode,stout,sterr=run_command(command)
rcode_for_git_exit(rcode,sterr)
exit(0)
except Exception, e:
sys.stderr.write("The add command threw this error: " + str(e))
sys.stderr.write("\ncommand: %s\n" % command)
log_gity_version(options.gityversion)
log_gitv(options.git)
exit(84) | gngrwzrd/gity | python/add.py | Python | gpl-3.0 | 1,341 | 0.028337 |
"""
By now, you are given a secret signature consisting of character 'D' and 'I'. 'D' represents a decreasing relationship between two numbers, 'I' represents an increasing relationship between two numbers. And our secret signature was constructed by a special integer array, which contains uniquely all the different number from 1 to n (n is the length of the secret signature plus 1). For example, the secret signature "DI" can be constructed by array [2,1,3] or [3,1,2], but won't be constructed by array [3,2,4] or [2,1,3,4], which are both illegal constructing special string that can't represent the "DI" secret signature.
On the other hand, now your job is to find the lexicographically smallest permutation of [1, 2, ... n] could refer to the given secret signature in the input.
Example 1:
Input: "I"
Output: [1,2]
Explanation: [1,2] is the only legal initial spectial string can construct secret signature "I", where the number 1 and 2 construct an increasing relationship.
Example 2:
Input: "DI"
Output: [2,1,3]
Explanation: Both [2,1,3] and [3,1,2] can construct the secret signature "DI",
but since we want to find the one with the smallest lexicographical permutation, you need to output [2,1,3]
Note:
The input string will only contain the character 'D' and 'I'.
The length of input string is a positive integer and will not exceed 10,000
"""
class Solution(object):
def findPermutation(self, s):
"""
:type s: str
:rtype: List[int]
"""
i, n = 0, len(s)
ans = range(1, n + 2)
while i < n:
j = i
while j < n and s[j] == "D":
j += 1
ans[i:j+1] = ans[i:j+1][::-1]
i = i + 1 if i == j else j
return ans
| dichen001/Go4Jobs | JackChen/Google/484. Find Permutation.py | Python | gpl-3.0 | 1,750 | 0.003429 |
from troposphere.constants import NUMBER
from troposphere import FindInMap, GetAtt, Join, Output
from troposphere import Parameter, Ref, Template
from troposphere.awslambda import Function, Code, MEMORY_VALUES
from troposphere.cloudformation import CustomResource
from troposphere.ec2 import Instance
from troposphere.ec2 import SecurityGroup
from troposphere.iam import Role, Policy
t = Template()
t.add_version("2010-09-09")
ExistingVPC = t.add_parameter(Parameter(
"ExistingVPC",
Type="AWS::EC2::VPC::Id",
Description=(
"The VPC ID that includes the security groups in the "
"ExistingSecurityGroups parameter."
),
))
InstanceType = t.add_parameter(Parameter(
"InstanceType",
Default="t2.micro",
Type="String",
AllowedValues=["t2.micro", "m1.small"],
))
ExistingSecurityGroups = t.add_parameter(Parameter(
"ExistingSecurityGroups",
Type="List<AWS::EC2::SecurityGroup::Id>",
))
MemorySize = t.add_parameter(Parameter(
'LambdaMemorySize',
Type=NUMBER,
Description='Amount of memory to allocate to the Lambda Function',
Default='128',
AllowedValues=MEMORY_VALUES
))
Timeout = t.add_parameter(Parameter(
'LambdaTimeout',
Type=NUMBER,
Description='Timeout in seconds for the Lambda function',
Default='60'
))
t.add_mapping("AWSInstanceType2Arch",
{u'm1.small': {u'Arch': u'PV64'},
u't2.micro': {u'Arch': u'HVM64'}}
)
t.add_mapping("AWSRegionArch2AMI",
{u'ap-northeast-1': {u'HVM64': u'ami-cbf90ecb',
u'PV64': u'ami-27f90e27'},
u'ap-southeast-1': {u'HVM64': u'ami-68d8e93a',
u'PV64': u'ami-acd9e8fe'},
u'ap-southeast-2': {u'HVM64': u'ami-fd9cecc7',
u'PV64': u'ami-ff9cecc5'},
u'cn-north-1': {u'HVM64': u'ami-f239abcb',
u'PV64': u'ami-fa39abc3'},
u'eu-central-1': {u'HVM64': u'ami-a8221fb5',
u'PV64': u'ami-ac221fb1'},
u'eu-west-1': {u'HVM64': u'ami-a10897d6',
u'PV64': u'ami-bf0897c8'},
u'sa-east-1': {u'HVM64': u'ami-b52890a8',
u'PV64': u'ami-bb2890a6'},
u'us-east-1': {u'HVM64': u'ami-1ecae776',
u'PV64': u'ami-1ccae774'},
u'us-west-1': {u'HVM64': u'ami-d114f295',
u'PV64': u'ami-d514f291'},
u'us-west-2': {u'HVM64': u'ami-e7527ed7',
u'PV64': u'ami-ff527ecf'}}
)
code = [
"var response = require('cfn-response');",
"exports.handler = function(event, context) {",
" var responseData = {Value: event.ResourceProperties.List};",
" responseData.Value.push(event.ResourceProperties.AppendedItem);",
" response.send(event, context, response.SUCCESS, responseData);",
"};",
]
AppendItemToListFunction = t.add_resource(Function(
"AppendItemToListFunction",
Code=Code(
ZipFile=Join("", code)
),
Handler="index.handler",
Role=GetAtt("LambdaExecutionRole", "Arn"),
Runtime="nodejs",
MemorySize=Ref(MemorySize),
Timeout=Ref(Timeout)
))
LambdaExecutionRole = t.add_resource(Role(
"LambdaExecutionRole",
Path="/",
Policies=[Policy(
PolicyName="root",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["logs:*"],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow"
}]
})],
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"Service": ["lambda.amazonaws.com"]
}
}]
},
))
MyEC2Instance = t.add_resource(Instance(
"MyEC2Instance",
SecurityGroupIds=GetAtt("AllSecurityGroups", "Value"),
InstanceType=Ref(InstanceType),
ImageId=FindInMap("AWSRegionArch2AMI", Ref("AWS::Region"),
FindInMap("AWSInstanceType2Arch", Ref(InstanceType),
"Arch")),
))
AllSecurityGroups = t.add_resource(CustomResource(
"AllSecurityGroups",
List=Ref(ExistingSecurityGroups),
AppendedItem=Ref("SecurityGroup"),
ServiceToken=GetAtt(AppendItemToListFunction, "Arn"),
))
SecurityGroup = t.add_resource(SecurityGroup(
"SecurityGroup",
SecurityGroupIngress=[
{"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0",
"FromPort": "80"}],
VpcId=Ref(ExistingVPC),
GroupDescription="Allow HTTP traffic to the host",
SecurityGroupEgress=[
{"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0",
"FromPort": "80"}],
))
AllSecurityGroups = t.add_output(Output(
"AllSecurityGroups",
Description="Security Groups that are associated with the EC2 instance",
Value=Join(", ", GetAtt(AllSecurityGroups, "Value")),
))
print(t.to_json())
| 7digital/troposphere | examples/Lambda.py | Python | bsd-2-clause | 5,154 | 0 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
class GMM(estimator.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See TensorFlowEstimator
steps: See TensorFlowEstimator
continue_training: See TensorFlowEstimator
config: See TensorFlowEstimator
verbose: See TensorFlowEstimator
"""
super(GMM, self).__init__(
model_dir=model_dir,
config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See TensorFlowEstimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def clusters(self):
"""Returns cluster centers."""
clusters = tf.contrib.framework.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return tf.contrib.framework.load_variable(
self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
return features
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss = tf.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.SCORES: tf.reduce_sum(losses),
}
| neilhan/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | Python | apache-2.0 | 7,521 | 0.002792 |
# coding: utf-8
"""Test that tokenizer prefixes, suffixes and infixes are handled correctly."""
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text', ["(can)"])
def test_tokenizer_splits_no_special(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["can't"])
def test_tokenizer_splits_no_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize('text', ["(can't"])
def test_tokenizer_splits_prefix_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["can't)"])
def test_tokenizer_splits_suffix_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["(can't)"])
def test_tokenizer_splits_even_wrap(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize('text', ["(can't?)"])
def test_tokenizer_splits_uneven_wrap(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize('text,length', [("U.S.", 1), ("us.", 2), ("(U.S.", 2)])
def test_tokenizer_splits_prefix_interact(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize('text', ["U.S.)"])
def test_tokenizer_splits_suffix_interact(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize('text', ["(U.S.)"])
def test_tokenizer_splits_even_wrap_interact(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["(U.S.?)"])
def test_tokenizer_splits_uneven_wrap_interact(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize('text', ["best-known"])
def test_tokenizer_splits_hyphens(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_tokenizer_splits_numeric_range(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["best.Known", "Hello.World"])
def test_tokenizer_splits_period_infix(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["Hello,world", "one,two"])
def test_tokenizer_splits_comma_infix(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize('text', ["best...Known", "best...known"])
def test_tokenizer_splits_ellipsis_infix(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
def test_tokenizer_splits_double_hyphen_infix(en_tokenizer):
tokens = en_tokenizer("No decent--let alone well-bred--people.")
assert tokens[0].text == "No"
assert tokens[1].text == "decent"
assert tokens[2].text == "--"
assert tokens[3].text == "let"
assert tokens[4].text == "alone"
assert tokens[5].text == "well"
assert tokens[6].text == "-"
assert tokens[7].text == "bred"
assert tokens[8].text == "--"
assert tokens[9].text == "people"
@pytest.mark.xfail
def test_tokenizer_splits_period_abbr(en_tokenizer):
text = "Today is Tuesday.Mr."
tokens = en_tokenizer(text)
assert len(tokens) == 5
assert tokens[0].text == "Today"
assert tokens[1].text == "is"
assert tokens[2].text == "Tuesday"
assert tokens[3].text == "."
assert tokens[4].text == "Mr."
@pytest.mark.xfail
def test_tokenizer_splits_em_dash_infix(en_tokenizer):
# Re Issue #225
tokens = en_tokenizer("""Will this road take me to Puddleton?\u2014No, """
"""you'll have to walk there.\u2014Ariel.""")
assert tokens[6].text == "Puddleton"
assert tokens[7].text == "?"
assert tokens[8].text == "\u2014"
| aikramer2/spaCy | spacy/tests/lang/en/test_prefix_suffix_infix.py | Python | mit | 4,124 | 0.000242 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A package for handling multi-dimensional data and associated metadata.
.. note ::
The Iris documentation has further usage information, including
a :ref:`user guide <user_guide_index>` which should be the first port of
call for new users.
The functions in this module provide the main way to load and/or save
your data.
The :func:`load` function provides a simple way to explore data from
the interactive Python prompt. It will convert the source data into
:class:`Cubes <iris.cube.Cube>`, and combine those cubes into
higher-dimensional cubes where possible.
The :func:`load_cube` and :func:`load_cubes` functions are similar to
:func:`load`, but they raise an exception if the number of cubes is not
what was expected. They are more useful in scripts, where they can
provide an early sanity check on incoming data.
The :func:`load_raw` function is provided for those occasions where the
automatic combination of cubes into higher-dimensional cubes is
undesirable. However, it is intended as a tool of last resort! If you
experience a problem with the automatic combination process then please
raise an issue with the Iris developers.
To persist a cube to the file-system, use the :func:`save` function.
All the load functions share very similar arguments:
* uris:
Either a single filename/URI expressed as a string or
:class:`pathlib.PurePath`, or an iterable of filenames/URIs.
Filenames can contain `~` or `~user` abbreviations, and/or
Unix shell-style wildcards (e.g. `*` and `?`). See the
standard library function :func:`os.path.expanduser` and
module :mod:`fnmatch` for more details.
.. warning::
If supplying a URL, only OPeNDAP Data Sources are supported.
* constraints:
Either a single constraint, or an iterable of constraints.
Each constraint can be either a string, an instance of
:class:`iris.Constraint`, or an instance of
:class:`iris.AttributeConstraint`. If the constraint is a string
it will be used to match against cube.name().
.. _constraint_egs:
For example::
# Load air temperature data.
load_cube(uri, 'air_temperature')
# Load data with a specific model level number.
load_cube(uri, iris.Constraint(model_level_number=1))
# Load data with a specific STASH code.
load_cube(uri, iris.AttributeConstraint(STASH='m01s00i004'))
* callback:
A function to add metadata from the originating field and/or URI which
obeys the following rules:
1. Function signature must be: ``(cube, field, filename)``.
2. Modifies the given cube inplace, unless a new cube is
returned by the function.
3. If the cube is to be rejected the callback must raise
an :class:`iris.exceptions.IgnoreCubeException`.
For example::
def callback(cube, field, filename):
# Extract ID from filenames given as: <prefix>__<exp_id>
experiment_id = filename.split('__')[1]
experiment_coord = iris.coords.AuxCoord(
experiment_id, long_name='experiment_id')
cube.add_aux_coord(experiment_coord)
"""
import contextlib
import glob
import itertools
import os.path
import pathlib
import threading
import iris._constraints
from iris._deprecation import IrisDeprecation, warn_deprecated
import iris.config
import iris.io
try:
import iris_sample_data
except ImportError:
iris_sample_data = None
# Iris revision.
__version__ = "3.3.dev0"
# Restrict the names imported when using "from iris import *"
__all__ = [
"AttributeConstraint",
"Constraint",
"FUTURE",
"Future",
"IrisDeprecation",
"NameConstraint",
"load",
"load_cube",
"load_cubes",
"load_raw",
"sample_data_path",
"save",
"site_configuration",
]
Constraint = iris._constraints.Constraint
AttributeConstraint = iris._constraints.AttributeConstraint
NameConstraint = iris._constraints.NameConstraint
class Future(threading.local):
"""Run-time configuration controller."""
def __init__(self):
"""
A container for run-time options controls.
To adjust the values simply update the relevant attribute from
within your code. For example::
iris.FUTURE.example_future_flag = False
If Iris code is executed with multiple threads, note the values of
these options are thread-specific.
.. note::
iris.FUTURE.example_future_flag does not exist. It is provided
as an example because there are currently no flags in
iris.Future.
"""
# The flag 'example_future_flag' is provided as a future reference
# for the structure of this class.
#
# self.__dict__['example_future_flag'] = example_future_flag
pass
def __repr__(self):
# msg = ('Future(example_future_flag={})')
# return msg.format(self.example_future_flag)
msg = "Future()"
return msg.format()
# deprecated_options = {'example_future_flag': 'warning',}
deprecated_options = {}
def __setattr__(self, name, value):
if name in self.deprecated_options:
level = self.deprecated_options[name]
if level == "error" and not value:
emsg = (
"setting the 'Future' property {prop!r} has been "
"deprecated to be removed in a future release, and "
"deprecated {prop!r} behaviour has been removed. "
"Please remove code that sets this property."
)
raise AttributeError(emsg.format(prop=name))
else:
msg = (
"setting the 'Future' property {!r} is deprecated "
"and will be removed in a future release. "
"Please remove code that sets this property."
)
warn_deprecated(msg.format(name))
if name not in self.__dict__:
msg = "'Future' object has no attribute {!r}".format(name)
raise AttributeError(msg)
self.__dict__[name] = value
@contextlib.contextmanager
def context(self, **kwargs):
"""
Return a context manager which allows temporary modification of
the option values for the active thread.
On entry to the `with` statement, all keyword arguments are
applied to the Future object. On exit from the `with`
statement, the previous state is restored.
For example::
with iris.FUTURE.context(example_future_flag=False):
# ... code that expects some past behaviour
.. note::
iris.FUTURE.example_future_flag does not exist and is
provided only as an example since there are currently no
flags in Future.
"""
# Save the current context
current_state = self.__dict__.copy()
# Update the state
for name, value in kwargs.items():
setattr(self, name, value)
try:
yield
finally:
# Return the state
self.__dict__.clear()
self.__dict__.update(current_state)
#: Object containing all the Iris run-time options.
FUTURE = Future()
# Initialise the site configuration dictionary.
#: Iris site configuration dictionary.
site_configuration = {}
try:
from iris.site_config import update as _update
except ImportError:
pass
else:
_update(site_configuration)
def _generate_cubes(uris, callback, constraints):
"""Returns a generator of cubes given the URIs and a callback."""
if isinstance(uris, (str, pathlib.PurePath)):
uris = [uris]
# Group collections of uris by their iris handler
# Create list of tuples relating schemes to part names
uri_tuples = sorted(iris.io.decode_uri(uri) for uri in uris)
for scheme, groups in itertools.groupby(uri_tuples, key=lambda x: x[0]):
# Call each scheme handler with the appropriate URIs
if scheme == "file":
part_names = [x[1] for x in groups]
for cube in iris.io.load_files(part_names, callback, constraints):
yield cube
elif scheme in ["http", "https"]:
urls = [":".join(x) for x in groups]
for cube in iris.io.load_http(urls, callback):
yield cube
else:
raise ValueError("Iris cannot handle the URI scheme: %s" % scheme)
def _load_collection(uris, constraints=None, callback=None):
from iris.cube import _CubeFilterCollection
try:
cubes = _generate_cubes(uris, callback, constraints)
result = _CubeFilterCollection.from_cubes(cubes, constraints)
except EOFError as e:
raise iris.exceptions.TranslationError(
"The file appears empty or incomplete: {!r}".format(str(e))
)
return result
def load(uris, constraints=None, callback=None):
"""
Loads any number of Cubes for each constraint.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
* constraints:
One or more constraints.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.CubeList`. Note that there is no inherent order
to this :class:`iris.cube.CubeList` and it should be treated as if it
were random.
"""
return _load_collection(uris, constraints, callback).merged().cubes()
def load_cube(uris, constraint=None, callback=None):
"""
Loads a single cube.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
* constraints:
A constraint.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.Cube`.
"""
constraints = iris._constraints.list_of_constraints(constraint)
if len(constraints) != 1:
raise ValueError("only a single constraint is allowed")
cubes = _load_collection(uris, constraints, callback).cubes()
try:
cube = cubes.merge_cube()
except iris.exceptions.MergeError as e:
raise iris.exceptions.ConstraintMismatchError(str(e))
except ValueError:
raise iris.exceptions.ConstraintMismatchError("no cubes found")
return cube
def load_cubes(uris, constraints=None, callback=None):
"""
Loads exactly one Cube for each constraint.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
* constraints:
One or more constraints.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.CubeList`. Note that there is no inherent order
to this :class:`iris.cube.CubeList` and it should be treated as if it
were random.
"""
# Merge the incoming cubes
collection = _load_collection(uris, constraints, callback).merged()
# Make sure we have exactly one merged cube per constraint
bad_pairs = [pair for pair in collection.pairs if len(pair) != 1]
if bad_pairs:
fmt = " {} -> {} cubes"
bits = [fmt.format(pair.constraint, len(pair)) for pair in bad_pairs]
msg = "\n" + "\n".join(bits)
raise iris.exceptions.ConstraintMismatchError(msg)
return collection.cubes()
def load_raw(uris, constraints=None, callback=None):
"""
Loads non-merged cubes.
This function is provided for those occasions where the automatic
combination of cubes into higher-dimensional cubes is undesirable.
However, it is intended as a tool of last resort! If you experience
a problem with the automatic combination process then please raise
an issue with the Iris developers.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
* constraints:
One or more constraints.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.CubeList`.
"""
from iris.fileformats.um._fast_load import _raw_structured_loading
with _raw_structured_loading():
return _load_collection(uris, constraints, callback).cubes()
save = iris.io.save
def sample_data_path(*path_to_join):
"""
Given the sample data resource, returns the full path to the file.
.. note::
This function is only for locating files in the iris sample data
collection (installed separately from iris). It is not needed or
appropriate for general file access.
"""
target = os.path.join(*path_to_join)
if os.path.isabs(target):
raise ValueError(
"Absolute paths, such as {!r}, are not supported.\n"
"NB. This function is only for locating files in the "
"iris sample data collection. It is not needed or "
"appropriate for general file access.".format(target)
)
if iris_sample_data is not None:
target = os.path.join(iris_sample_data.path, target)
else:
raise ImportError(
"Please install the 'iris-sample-data' package to "
"access sample data."
)
if not glob.glob(target):
raise ValueError(
"Sample data file(s) at {!r} not found.\n"
"NB. This function is only for locating files in the "
"iris sample data collection. It is not needed or "
"appropriate for general file access.".format(target)
)
return target
| SciTools/iris | lib/iris/__init__.py | Python | lgpl-3.0 | 14,621 | 0 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'git',
]
def RunSteps(api):
url = 'https://chromium.googlesource.com/chromium/src.git'
# git.checkout can optionally dump GIT_CURL_VERBOSE traces to a log file,
# useful for debugging git access issues that are reproducible only on bots.
curl_trace_file = None
if api.properties.get('use_curl_trace'):
curl_trace_file = api.path['start_dir'].join('curl_trace.log')
submodule_update_force = api.properties.get('submodule_update_force', False)
submodule_update_recursive = api.properties.get('submodule_update_recursive',
True)
# You can use api.git.checkout to perform all the steps of a safe checkout.
revision = (api.buildbucket.gitiles_commit.ref or
api.buildbucket.gitiles_commit.id)
retVal = api.git.checkout(
url,
ref=revision,
recursive=True,
submodule_update_force=submodule_update_force,
set_got_revision=api.properties.get('set_got_revision'),
curl_trace_file=curl_trace_file,
remote_name=api.properties.get('remote_name'),
display_fetch_size=api.properties.get('display_fetch_size'),
file_name=api.properties.get('checkout_file_name'),
submodule_update_recursive=submodule_update_recursive,
use_git_cache=api.properties.get('use_git_cache'),
tags=api.properties.get('tags'))
assert retVal == "deadbeef", (
"expected retVal to be %r but was %r" % ("deadbeef", retVal))
# count_objects shows number and size of objects in .git dir.
api.git.count_objects(
name='count-objects',
can_fail_build=api.properties.get('count_objects_can_fail_build'),
git_config_options={'foo': 'bar'})
# Get the remote URL.
api.git.get_remote_url(
step_test_data=lambda: api.raw_io.test_api.stream_output('foo'))
api.git.get_timestamp(test_data='foo')
# You can use api.git.fetch_tags to fetch all tags from the remote
api.git.fetch_tags(api.properties.get('remote_name'))
# If you need to run more arbitrary git commands, you can use api.git itself,
# which behaves like api.step(), but automatically sets the name of the step.
with api.context(cwd=api.path['checkout']):
api.git('status')
api.git('status', name='git status can_fail_build',
can_fail_build=True)
api.git('status', name='git status cannot_fail_build',
can_fail_build=False)
# You should run git new-branch before you upload something with git cl.
api.git.new_branch('refactor') # Upstream is origin/master by default.
# And use upstream kwarg to set up different upstream for tracking.
api.git.new_branch('feature', upstream='refactor')
# You can use api.git.rebase to rebase the current branch onto another one
api.git.rebase(name_prefix='my repo', branch='origin/master',
dir_path=api.path['checkout'],
remote_name=api.properties.get('remote_name'))
if api.properties.get('cat_file', None):
step_result = api.git.cat_file_at_commit(api.properties['cat_file'],
revision,
stdout=api.raw_io.output())
if 'TestOutput' in step_result.stdout:
pass # Success!
# Bundle the repository.
api.git.bundle_create(
api.path['start_dir'].join('all.bundle'))
def GenTests(api):
yield api.test('basic')
yield api.test('basic_tags') + api.properties(tags=True)
yield api.test('basic_ref') + api.buildbucket.ci_build(git_ref='refs/foo/bar')
yield api.test('basic_branch') + api.buildbucket.ci_build(
git_ref='refs/heads/testing')
yield api.test('basic_hash') + api.buildbucket.ci_build(
revision='abcdef0123456789abcdef0123456789abcdef01', git_ref=None)
yield api.test('basic_file_name') + api.properties(checkout_file_name='DEPS')
yield api.test('basic_submodule_update_force') + api.properties(
submodule_update_force=True)
yield api.test('platform_win') + api.platform.name('win')
yield (
api.test('curl_trace_file') +
api.properties(use_curl_trace=True) +
api.buildbucket.ci_build(git_ref='refs/foo/bar')
)
yield (
api.test('can_fail_build') +
api.step_data('git status can_fail_build', retcode=1)
)
yield (
api.test('cannot_fail_build') +
api.step_data('git status cannot_fail_build', retcode=1)
)
yield (
api.test('set_got_revision') +
api.properties(set_got_revision=True)
)
yield (
api.test('rebase_failed') +
api.step_data('my repo rebase', retcode=1)
)
yield api.test('remote_not_origin') + api.properties(remote_name='not_origin')
yield (
api.test('count-objects_delta') +
api.properties(display_fetch_size=True))
yield (
api.test('count-objects_failed') +
api.step_data('count-objects', retcode=1))
yield (
api.test('count-objects_with_bad_output') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))))
yield (
api.test('count-objects_with_bad_output_fails_build') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))) +
api.properties(count_objects_can_fail_build=True))
yield (
api.test('cat-file_test') +
api.step_data('git cat-file abcdef12345:TestFile',
stdout=api.raw_io.output('TestOutput')) +
api.buildbucket.ci_build(revision='abcdef12345', git_ref=None) +
api.properties(cat_file='TestFile'))
yield (
api.test('git-cache-checkout') +
api.properties(use_git_cache=True))
| endlessm/chromium-browser | third_party/depot_tools/recipes/recipe_modules/git/examples/full.py | Python | bsd-3-clause | 5,942 | 0.009424 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
VERSION = '1.0.0'
README = open('README.rst').read()
setup(
name='mach',
description='Generic command line command dispatching framework.',
long_description=README,
license='MPL 2.0',
author='Gregory Szorc',
author_email='gregory.szorc@gmail.com',
url='https://developer.mozilla.org/en-US/docs/Developer_Guide/mach',
packages=['mach', 'mach.mixin'],
version=VERSION,
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
install_requires=[
'blessings',
'mozfile',
'mozprocess',
'six',
],
tests_require=['mock'],
)
| CYBAI/servo | python/mach/setup.py | Python | mpl-2.0 | 1,204 | 0 |
from django.contrib.auth import views as auth_views
from django.conf.urls import url
from . import views
app_name = "accounts"
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'accounts/signin.html'}, name='signin'),
url(r'^signup/', views.SignUpView.as_view(), name="signup"),
url(r'^logout/', auth_views.logout, name="logout"),
]
| rodriguesrl/reddit-clone-udemy | accounts/urls.py | Python | mit | 366 | 0.002732 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.