text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
TIPOS_MENSAJES = {
'entrada': "entrada",
'salida': "salida",
'pago_ticket': "pago de ticket",
}
MENSAJES = {
'entrada': "Se ha reportado en el sistema una entrada ",
'salida': "salida",
'pago_ticket': "pago de ticket",
}
def
|
ac-seguridad/ac-seguridad
|
project/manejador/mensajes.py
|
Python
|
apache-2.0
| 256 | 0.003906 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motor INA219 hardware monitor configuration."""
from makani.avionics.firmware.drivers import ina219_types
from makani.avionics.firmware.serial import motor_serial_params as rev
ina219_default = {
'name': '',
'address': 0x0,
'shunt_resistor': 0.01,
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range40mv,
'bus_adc': ina219_types.kIna219Adc128Samples,
'shunt_adc': ina219_types.kIna219Adc128Samples,
'mode': ina219_types.kIna219ModeShuntAndBusContinuous,
'current_max': -1,
'voltage_limits_percent': [95, 105],
}
ina219_16v_40mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range40mv,
})
ina219_16v_80mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range80mv,
})
ina219_32v_40mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage32V,
'range': ina219_types.kIna219Range40mv,
})
ina219_32v_160mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage32V,
'range': ina219_types.kIna219Range160mv,
})
gin_a1 = [
dict(ina219_32v_40mv, name='12v', address=0x40, shunt_resistor=0.012),
dict(ina219_16v_40mv, name='1v2', address=0x42, shunt_resistor=0.02),
dict(ina219_16v_40mv, name='3v3', address=0x45, shunt_resistor=0.02),
]
gin_a2 = gin_a1
gin_a3 = [
dict(ina219_32v_160mv, name='12v', address=0x41, shunt_resistor=0.05),
dict(ina219_16v_80mv, name='1v2', address=0x42, shunt_resistor=0.05),
dict(ina219_16v_80mv, name='3v3', address=0x45, shunt_resistor=0.05),
]
ina219_config = (rev.MotorHardware, {
rev.MotorHardware.GIN_A1: gin_a1,
rev.MotorHardware.GIN_A2: gin_a2,
rev.MotorHardware.GIN_A3: gin_a3,
rev.MotorHardware.GIN_A4_CLK16: gin_a3,
rev.MotorHardware.GIN_A4_CLK8: gin_a3,
rev.MotorHardware.OZONE_A1: gin_a3,
})
|
google/makani
|
avionics/motor/monitors/motor_ina219.py
|
Python
|
apache-2.0
| 2,535 | 0 |
import csv
import sys
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
def parse(file_name):
"Parses the data sets from the csv file we are given to work with"
try:
file = open(file_name)
except IOError:
print "Failed to open the data file"
sys.exit()
rawFile = csv.reader(file) # Reading the csv file into a raw form
rawData = list(rawFile) # Converting the raw data into list from.
file.close()
return rawData
def toXandY(unorderedData):
"This method converts seperates x and y co-ordinates for plotting"
orderedData = []
orderedData.append([]) # Add a new sublist every time
orderedData.append([]) # Add a new sublist every time
listSize = len(unorderedData)
for x in range(0, listSize):
orderedData[0].append(unorderedData[x][0]) # Seperates the x-cords
for y in range(0, listSize):
orderedData[1].append(unorderedData[y][1]) # Seperates the y-cords
return orderedData
def main():
newData = []
f_line_x = []
f_line_y = []
file_name = "data.csv"
data = parse(file_name) # Calling the parse funtion we made
labels = data.pop(0) # Necessary evil
frontier_size = int(data.pop(0)[0])
list_size = len(data)
for i in range(0, list_size): # Converting the string list to float
newData.append([]) # Add a new sublsit every time
for j in range(0, 2): # Append converted data to the new list
newData[i].append(float(data[i][j]))
DataXandY = toXandY(newData) # DataXandY -> [[Xs][Ys]]
i = 0
while i < frontier_size:
i+=1
f_line_x.append(DataXandY[0].pop(0))
f_line_y.append(DataXandY[1].pop(0))
plt.xlabel(labels[0])
plt.ylabel(labels[1])
plt.title("Pareto dominance")
plt.plot(DataXandY[0], DataXandY[1], "o", color="g") # Plot all points
plt.plot(f_line_x, f_line_y, "-o", color="r") # Plot frontier line
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
if __name__ == "__main__":
main()
|
GZakharov1525/SOFE3770
|
Assignment2/plot_lines.py
|
Python
|
gpl-3.0
| 2,119 | 0.003303 |
#!/usr/bin/env python3
"""*.h5 の値の最小・最大などを確認するスクリプト。"""
import argparse
import pathlib
import sys
import h5py
import numpy as np
try:
import pytoolkit as tk
except ImportError:
sys.path.insert(0, str(pathlib.Path(__file__).resolve().parent.parent.parent))
import pytoolkit as tk
logger = tk.log.get(__name__)
def main():
tk.utils.better_exceptions()
tk.log.init(None)
parser = argparse.ArgumentParser(description="*.h5 の値の最小・最大などを確認するスクリプト。")
parser.add_argument("model_path", type=pathlib.Path, help="対象ファイルのパス(*.h5)")
args = parser.parse_args()
logger.info(f"{args.model_path} Loading...")
absmax_list = []
with h5py.File(args.model_path, mode="r") as f:
model_weights = f["model_weights"]
layer_names = model_weights.attrs["layer_names"]
for layer_name in layer_names:
g = model_weights[layer_name]
weight_names = g.attrs["weight_names"]
for weight_name in weight_names:
w = np.asarray(g[weight_name])
key = f"/model_weights/{layer_name}/{weight_name}"
if w.size == 1:
logger.info(f"{key}\t value={np.ravel(w)[0]:.2f}")
else:
logger.info(
f"{key}\t min={w.min():.2f} max={w.max():.2f} mean={w.mean():.2f} std={w.std():.2f}"
)
absmax_list.append((key, np.abs(w).max()))
logger.info("abs Top-10:")
for key, absvalue in list(sorted(absmax_list, key=lambda x: -x[1]))[:10]:
logger.info(f"{absvalue:6.1f}: {key}")
if __name__ == "__main__":
main()
|
ak110/pytoolkit
|
pytoolkit/bin/h5ls.py
|
Python
|
mit
| 1,745 | 0.001826 |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import mox
import os
import sys
import shutil
import time
import constants
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.buildbot import remote_try
from chromite.buildbot import repository
from chromite.scripts import cbuildbot
class RemoteTryJobMock(remote_try.RemoteTryJob):
pass
# pylint: disable=W0212,R0904,E1101
class RemoteTryTests(cros_test_lib.MoxTempDirTestCase):
PATCHES = ('5555', '6666')
BOTS = ('x86-generic-paladin', 'arm-generic-paladin')
def setUp(self):
self.parser = cbuildbot._CreateParser()
args = ['-r', '/tmp/test_build1', '-g', '5555', '-g',
'6666', '--remote']
args.extend(self.BOTS)
self.options, args = cbuildbot._ParseCommandLine(self.parser, args)
self.checkout_dir = os.path.join(self.tempdir, 'test_checkout')
self.int_mirror, self.ext_mirror = None, None
def _RunCommandSingleOutput(self, cmd, cwd):
result = cros_build_lib.RunCommandCaptureOutput(cmd, cwd=cwd)
out_lines = result.output.split()
self.assertEqual(len(out_lines), 1)
return out_lines[0]
def _GetNewestFile(self, dirname, basehash):
newhash = git.GetGitRepoRevision(dirname)
self.assertNotEqual(basehash, newhash)
cmd = ['git', 'log', '--format=%H', '%s..' % basehash]
# Make sure we have a single commit.
self._RunCommandSingleOutput(cmd, cwd=dirname)
cmd = ['git', 'diff', '--name-only', 'HEAD^']
# Make sure only one file per commit.
return self._RunCommandSingleOutput(cmd, cwd=dirname)
def _SubmitJob(self, checkout_dir, job, version=None):
"""Returns the path to the tryjob description."""
self.assertTrue(isinstance(job, RemoteTryJobMock))
basehash = git.GetGitRepoRevision(job.ssh_url)
if version is not None:
self._SetMirrorVersion(version)
job.Submit(workdir=checkout_dir, dryrun=True)
# Get the file that was just created.
created_file = self._GetNewestFile(checkout_dir, basehash)
return os.path.join(checkout_dir, created_file)
def _SetupMirrors(self):
mirror = os.path.join(self.tempdir, 'tryjobs_mirror')
os.mkdir(mirror)
url = '%s/%s' % (constants.GIT_HTTP_URL, 'chromiumos/tryjobs')
repository.CloneGitRepo(mirror, url,
bare=True)
self.ext_mirror = mirror
mirror = os.path.join(self.tempdir, 'tryjobs_int_mirror')
os.mkdir(mirror)
repository.CloneGitRepo(mirror, self.ext_mirror, reference=self.ext_mirror,
bare=True)
self.int_mirror = mirror
RemoteTryJobMock.EXT_SSH_URL = self.ext_mirror
RemoteTryJobMock.INT_SSH_URL = self.int_mirror
self._SetMirrorVersion(remote_try.RemoteTryJob.TRYJOB_FORMAT_VERSION, True)
def _SetMirrorVersion(self, version, only_if_missing=False):
for path in (self.ext_mirror, self.int_mirror):
vpath = os.path.join(path, remote_try.RemoteTryJob.TRYJOB_FORMAT_FILE)
if os.path.exists(vpath) and only_if_missing:
continue
# Get ourselves a working dir.
tmp_repo = os.path.join(self.tempdir, 'tmp-repo')
git.RunGit(self.tempdir, ['clone', path, tmp_repo])
vpath = os.path.join(tmp_repo, remote_try.RemoteTryJob.TRYJOB_FORMAT_FILE)
with open(vpath, 'w') as f:
f.write(str(version))
git.RunGit(tmp_repo, ['add', vpath])
git.RunGit(tmp_repo, ['commit', '-m', 'setting version to %s' % version])
git.RunGit(tmp_repo, ['push', path, 'master:master'])
shutil.rmtree(tmp_repo)
def _CreateJob(self, mirror=True):
job_class = remote_try.RemoteTryJob
if mirror:
job_class = RemoteTryJobMock
self._SetupMirrors()
job = job_class(self.options, self.BOTS, [])
return job
def testJobTimestamp(self):
"""Verify jobs have unique names."""
def submit_helper(dirname):
work_dir = os.path.join(self.tempdir, dirname)
return os.path.basename(self._SubmitJob(work_dir, job))
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
job = self._CreateJob()
file1 = submit_helper('test1')
# Tryjob file names are based on timestamp, so delay one second to avoid two
# jobfiles having the same name.
time.sleep(1)
file2 = submit_helper('test2')
self.assertNotEqual(file1, file2)
def testSimpleTryJob(self, version=None):
"""Test that a tryjob spec file is created and pushed properly."""
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(repository, 'IsInternalRepoCheckout')
repository.IsInternalRepoCheckout(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
try:
os.environ["GIT_AUTHOR_EMAIL"] = "Elmer Fudd <efudd@google.com>"
os.environ["GIT_COMMITTER_EMAIL"] = "Elmer Fudd <efudd@google.com>"
job = self._CreateJob()
finally:
os.environ.pop("GIT_AUTHOR_EMAIL", None)
os.environ.pop("GIT_COMMITTER_EMAIL", None)
created_file = self._SubmitJob(self.checkout_dir, job, version=version)
with open(created_file, 'rb') as job_desc_file:
values = json.load(job_desc_file)
self.assertTrue('efudd@google.com' in values['email'][0])
for patch in self.PATCHES:
self.assertTrue(patch in values['extra_args'],
msg="expected patch %s in args %s" %
(patch, values['extra_args']))
self.assertTrue(set(self.BOTS).issubset(values['bot']))
remote_url = cros_build_lib.RunCommand(
['git', 'config', 'remote.origin.url'], redirect_stdout=True,
cwd=self.checkout_dir).output.strip()
self.assertEqual(remote_url, self.ext_mirror)
def testClientVersionAwareness(self):
self.assertRaises(
remote_try.ChromiteUpgradeNeeded,
self.testSimpleTryJob,
version=remote_try.RemoteTryJob.TRYJOB_FORMAT_VERSION + 1)
def testInternalTryJob(self):
"""Verify internal tryjobs are pushed properly."""
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(repository, 'IsInternalRepoCheckout')
repository.IsInternalRepoCheckout(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
job = self._CreateJob()
self._SubmitJob(self.checkout_dir, job)
remote_url = cros_build_lib.RunCommand(
['git', 'config', 'remote.origin.url'], redirect_stdout=True,
cwd=self.checkout_dir).output.strip()
self.assertEqual(remote_url, self.int_mirror)
def testBareTryJob(self):
"""Verify submitting a tryjob from just a chromite checkout works."""
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(False)
self.mox.StubOutWithMock(repository, 'IsInternalRepoCheckout')
self.mox.ReplayAll()
job = self._CreateJob(mirror=False)
self.assertEqual(job.ssh_url, remote_try.RemoteTryJob.EXT_SSH_URL)
if __name__ == '__main__':
cros_test_lib.main()
|
espadrine/opera
|
chromium/src/third_party/chromite/buildbot/remote_try_unittest.py
|
Python
|
bsd-3-clause
| 7,349 | 0.006668 |
'''
base tools
'''
# -*- coding: utf-8 -*-
import re
def is_ipv4(ip) :
pattern = r'^(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[0-9]{1,2})){3}$'
matcher = re.match(pattern, ip)
if matcher is not None :
return True
return False
def is_domain(domain) :
pattern = r'[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+\.?'
matcher = re.match(pattern, domain)
if matcher is not None :
return True
return False
|
allen1989127/WhereRU
|
org/sz/tools.py
|
Python
|
gpl-3.0
| 508 | 0.015748 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Set up paths for Fast R-CNN."""
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add caffe to PYTHONPATH
caffe_path = osp.join(this_dir, '..', 'caffe-fast-rcnn', 'python')
add_path(caffe_path)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
|
yxliang/fast-rcnn
|
tools/_init_paths.py
|
Python
|
mit
| 637 | 0.00314 |
from asyncio import coroutine
import pytest
from aiohttp import HttpBadRequest, HttpMethodNotAllowed
from fluentmock import create_mock
from aiohttp_rest import RestEndpoint
class CustomEndpoint(RestEndpoint):
def get(self):
pass
def patch(self):
pass
@pytest.fixture
def endpoint():
return RestEndpoint()
@pytest.fixture
def custom_endpoint():
return CustomEndpoint()
def test_exiting_methods_are_registered_during_initialisation(custom_endpoint: CustomEndpoint):
assert len(custom_endpoint.methods) == 2
assert ('GET', custom_endpoint.get) in custom_endpoint.methods.items()
assert ('PATCH', custom_endpoint.patch) in custom_endpoint.methods.items()
def test_register_method(endpoint: RestEndpoint):
def sample_method():
pass
endpoint.register_method('verb', sample_method)
assert ('VERB', sample_method) in endpoint.methods.items()
@pytest.mark.asyncio
async def test_dispatch_uses_correct_handler_for_verb(endpoint: RestEndpoint):
endpoint.register_method('VERB1', coroutine(lambda: 5))
endpoint.register_method('VERB2', coroutine(lambda: 17))
assert await endpoint.dispatch(create_mock(method='VERB1', match_info={})) == 5
assert await endpoint.dispatch(create_mock(method='VERB2', match_info={})) == 17
@pytest.mark.asyncio
async def test_dispatch_passes_request_when_required(endpoint: RestEndpoint):
endpoint.register_method('REQUEST', coroutine(lambda request: request))
request = create_mock(method='REQUEST', match_info={})
assert await endpoint.dispatch(request) == request
@pytest.mark.asyncio
async def test_dispatch_passes_match_info_when_required(endpoint: RestEndpoint):
endpoint.register_method('MATCH_INFO', coroutine(lambda prop1, prop2: (prop2, prop1)))
request = create_mock(method='MATCH_INFO', match_info={'prop1': 1, 'prop2': 2})
assert await endpoint.dispatch(request) == (2, 1)
@pytest.mark.asyncio
async def test_dispatch_raises_bad_request_when_match_info_does_not_exist(endpoint: RestEndpoint):
endpoint.register_method('BAD_MATCH_INFO', coroutine(lambda no_match: no_match))
request = create_mock(method='BAD_MATCH_INFO', match_info={})
with pytest.raises(HttpBadRequest):
await endpoint.dispatch(request)
@pytest.mark.asyncio
async def test_dispatch_raises_method_not_allowed_when_verb_not_matched(endpoint: RestEndpoint):
request = create_mock(method='NO_METHOD')
with pytest.raises(HttpMethodNotAllowed):
await endpoint.dispatch(request)
|
atbentley/aiohttp-rest
|
tests/test_endpoint.py
|
Python
|
mit
| 2,542 | 0.003541 |
# coding=utf-8
import logging
import time
from adapter import Adapter
DROIDBOT_APP_PACKAGE = "io.github.ylimit.droidbotapp"
IME_SERVICE = DROIDBOT_APP_PACKAGE + "/.DroidBotIME"
class DroidBotImeException(Exception):
"""
Exception in telnet connection
"""
pass
class DroidBotIme(Adapter):
"""
a connection with droidbot ime app.
"""
def __init__(self, device=None):
"""
initiate a emulator console via telnet
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.connected = False
def set_up(self):
device = self.device
if DROIDBOT_APP_PACKAGE in device.adb.get_installed_apps():
self.logger.debug("DroidBot app was already installed.")
else:
# install droidbot app
try:
import pkg_resources
droidbot_app_path = pkg_resources.resource_filename("droidbot", "resources/droidbotApp.apk")
install_cmd = "install %s" % droidbot_app_path
self.device.adb.run_cmd(install_cmd)
self.logger.debug("DroidBot app installed.")
except Exception as e:
self.logger.warning(e.message)
self.logger.warning("Failed to install DroidBotApp.")
def tear_down(self):
self.device.uninstall_app(DROIDBOT_APP_PACKAGE)
def connect(self):
r_enable = self.device.adb.shell("ime enable %s" % IME_SERVICE)
if r_enable.endswith("now enabled"):
r_set = self.device.adb.shell("ime set %s" % IME_SERVICE)
if r_set.endswith("selected"):
self.connected = True
return
self.logger.warning("Failed to connect DroidBotIME!")
def check_connectivity(self):
"""
check if droidbot app is connected
:return: True for connected
"""
return self.connected
def disconnect(self):
"""
disconnect telnet
"""
self.connected = False
r_disable = self.device.adb.shell("ime disable %s" % IME_SERVICE)
if r_disable.endswith("now disabled"):
self.connected = False
print "[CONNECTION] %s is disconnected" % self.__class__.__name__
return
self.logger.warning("Failed to disconnect DroidBotIME!")
def input_text(self, text, mode=0):
"""
Input text to target device
:param text: text to input, can be unicode format
:param mode: 0 - set text; 1 - append text.
"""
input_cmd = "am broadcast -a DROIDBOT_INPUT_TEXT --es text \"%s\" --ei mode %d" % (text, mode)
self.device.adb.shell(input_cmd)
if __name__ == "__main__":
droidbot_ime_conn = DroidBotIme()
droidbot_ime_conn.set_up()
droidbot_ime_conn.connect()
droidbot_ime_conn.input_text("hello world!", 0)
droidbot_ime_conn.input_text(u"世界你好!", 1)
time.sleep(2)
droidbot_ime_conn.input_text(u"再见。Bye bye.", 0)
droidbot_ime_conn.disconnect()
droidbot_ime_conn.tear_down()
|
nastya/droidbot
|
droidbot/adapter/droidbot_ime.py
|
Python
|
mit
| 3,282 | 0.000612 |
# postgresql/json.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import json
import collections
from .base import ischema_names, colspecs
from ... import types as sqltypes
from ...sql import operators
from ...sql import elements
from ... import util
__all__ = ('JSON', 'JSONB')
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
ASTEXT = operators.custom_op(
"->>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
JSONPATH_ASTEXT = operators.custom_op(
"#>>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_KEY = operators.custom_op(
"?", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_ALL = operators.custom_op(
"?&", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_ANY = operators.custom_op(
"?|", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
CONTAINS = operators.custom_op(
"@>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
CONTAINED_BY = operators.custom_op(
"<@", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
class JSONPathType(sqltypes.JSON.JSONPathType):
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
assert isinstance(value, collections.Sequence)
tokens = [util.text_type(elem)for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
assert isinstance(value, collections.Sequence)
tokens = [util.text_type(elem)for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
return value
return process
colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
class JSON(sqltypes.JSON):
"""Represent the PostgreSQL JSON type.
This type is a specialization of the Core-level :class:`.types.JSON`
type. Be sure to read the documentation for :class:`.types.JSON` for
important tips regarding treatment of NULL values and ORM use.
.. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a PostgreSQL-
specific specialization of the new :class:`.types.JSON` type.
The operators provided by the PostgreSQL version of :class:`.JSON`
include:
* Index operations (the ``->`` operator)::
data_table.c.data['some key']
data_table.c.data[5]
* Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
* Index operations with CAST
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
data_table.c.data['some key'].astext.cast(Integer) == 5
* Path index operations (the ``#>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Path index operations returning text (the ``#>>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \
'some value'
.. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
JSON objects now requires that the :attr:`.JSON.Comparator.astext`
modifier be called explicitly, if the cast works only from a textual
string.
Index operations return an expression object whose type defaults to
:class:`.JSON` by default, so that further JSON-oriented instructions
may be called upon the result type.
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`.create_engine`. The reason for this is that when
using psycopg2, the DBAPI only allows serializers at the per-cursor
or per-connection level. E.g.::
engine = create_engine("postgresql://scott:tiger@localhost/test",
json_serializer=my_serialize_fn,
json_deserializer=my_deserialize_fn
)
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
.. seealso::
:class:`.types.JSON` - Core level JSON type
:class:`.JSONB`
"""
astext_type = sqltypes.Text()
def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`.JSON` type.
:param none_as_null: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that
when this flag is False, the :func:`.null` construct can still
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
.. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
is now supported in order to persist a NULL value.
.. seealso::
:attr:`.JSON.NULL`
:param astext_type: the type to use for the
:attr:`.JSON.Comparator.astext`
accessor on indexed attributes. Defaults to :class:`.types.Text`.
.. versionadded:: 1.1
"""
super(JSON, self).__init__(none_as_null=none_as_null)
if astext_type is not None:
self.astext_type = astext_type
class Comparator(sqltypes.JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
@property
def astext(self):
"""On an indexed expression, use the "astext" (e.g. "->>")
conversion when rendered in SQL.
E.g.::
select([data_table.c.data['some key'].astext])
.. seealso::
:meth:`.ColumnElement.cast`
"""
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
return self.expr.left.operate(
JSONPATH_ASTEXT,
self.expr.right, result_type=self.type.astext_type)
else:
return self.expr.left.operate(
ASTEXT, self.expr.right, result_type=self.type.astext_type)
comparator_factory = Comparator
colspecs[sqltypes.JSON] = JSON
ischema_names['json'] = JSON
class JSONB(JSON):
"""Represent the PostgreSQL JSONB type.
The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSONB)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
The :class:`.JSONB` type includes all operations provided by
:class:`.JSON`, including the same behaviors for indexing operations.
It also adds additional operators specific to JSONB, including
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
and :meth:`.JSONB.Comparator.contained_by`.
Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect
in-place changes when used with the ORM, unless the
:mod:`sqlalchemy.ext.mutable` extension is used.
Custom serializers and deserializers
are shared with the :class:`.JSON` class, using the ``json_serializer``
and ``json_deserializer`` keyword arguments. These must be specified
at the dialect level using :func:`.create_engine`. When using
psycopg2, the serializers are associated with the jsonb type using
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
in the same way that ``psycopg2.extras.register_default_json`` is used
to register these handlers with the json type.
.. versionadded:: 0.9.7
.. seealso::
:class:`.JSON`
"""
__visit_name__ = 'JSONB'
class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
ischema_names['jsonb'] = JSONB
|
fernandog/Medusa
|
ext/sqlalchemy/dialects/postgresql/json.py
|
Python
|
gpl-3.0
| 9,821 | 0.000204 |
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
from http import client
from radicale import httputils, types
from radicale.app.base import ApplicationBase
class ApplicationPartOptions(ApplicationBase):
def do_OPTIONS(self, environ: types.WSGIEnviron, base_prefix: str,
path: str, user: str) -> types.WSGIResponse:
"""Manage OPTIONS request."""
headers = {
"Allow": ", ".join(
name[3:] for name in dir(self) if name.startswith("do_")),
"DAV": httputils.DAV_HEADERS}
return client.OK, headers, None
|
Kozea/Radicale
|
radicale/app/options.py
|
Python
|
gpl-3.0
| 1,395 | 0 |
try:
import unittest2 as unittest # Python2.6
except ImportError:
import unittest
from tests.functional import test_base
@unittest.skipIf(test_base.get_test_server_api() == 1,
"The tag API didn't work at v1 - see frontend issue #927")
class TestTags(test_base.TestBase):
testcase_name = "tag API"
def test_create_delete(self, tag_id="create_tag"):
"""
Create a tag then delete it.
This test is a little contrived, since the tag create/delete
endpoints are only intended for internal use.
"""
# Create a tag
self.assertTrue(self.client.tag.create(tag_id))
# Check that the tag doesn't exist (It has no photos, so it's invisible)
self.assertNotIn(tag_id, [t.id for t in self.client.tags.list()])
# Create a tag on one of the photos
self.photos[0].update(tagsAdd=tag_id)
# Check that the tag now exists
self.assertIn(tag_id, [t.id for t in self.client.tags.list()])
# Delete the tag
self.assertTrue(self.client.tag.delete(tag_id))
# Check that the tag is now gone
self.assertNotIn(tag_id, [t.id for t in self.client.tags.list()])
# Also remove the tag from the photo
self.photos[0].update(tagsRemove=tag_id)
# Create the tag again
self.photos[0].update(tagsAdd=tag_id)
self.assertIn(tag_id, [t.id for t in self.client.tags.list()])
# Delete using the tag object directly
tag = [t for t in self.client.tags.list() if t.id == tag_id][0]
self.assertTrue(tag.delete())
# Check that the tag is now gone
self.assertNotIn(tag_id, [t.id for t in self.client.tags.list()])
# Also remove the tag from the photo
self.photos[0].update(tagsRemove=tag_id)
# TODO: Un-skip and update this tests once there are tag fields
# that can be updated (the owner field cannot be updated).
@unittest.skip("Can't test the tag.update endpoint, "
"since there are no fields that can be updated")
def test_update(self):
""" Test that a tag can be updated """
# Update the tag using the Trovebox class, passing in the tag object
owner = "test1@trovebox.com"
ret_val = self.client.tag.update(self.tags[0], owner=owner)
# Check that the tag is updated
self.tags = self.client.tags.list()
self.assertEqual(self.tags[0].owner, owner)
self.assertEqual(ret_val.owner, owner)
# Update the tag using the Trovebox class, passing in the tag id
owner = "test2@trovebox.com"
ret_val = self.client.tag.update(self.TEST_TAG, owner=owner)
# Check that the tag is updated
self.tags = self.client.tags.list()
self.assertEqual(self.tags[0].owner, owner)
self.assertEqual(ret_val.owner, owner)
# Update the tag using the Tag object directly
owner = "test3@trovebox.com"
ret_val = self.tags[0].update(owner=owner)
# Check that the tag is updated
self.tags = self.client.tags.list()
self.assertEqual(self.tags[0].owner, owner)
self.assertEqual(ret_val.owner, owner)
def test_tag_with_spaces(self):
""" Run test_create_delete using a tag containing spaces """
self.test_create_delete("tag with spaces")
def test_tag_with_slashes(self):
""" Run test_create_delete using a tag containing slashes """
self.test_create_delete("tag/with/slashes")
# TODO: Un-skip this test once issue #919 is resolved -
# tags with double-slashes cannot be deleted
@unittest.skip("Tags with double-slashed cannot be deleted")
def test_tag_with_double_slashes(self):
""" Run test_create_delete using a tag containing double-slashes """
self.test_create_delete("tag//with//double//slashes")
|
photo/openphoto-python
|
tests/functional/test_tags.py
|
Python
|
apache-2.0
| 3,889 | 0.000771 |
from Tkinter import *
import tkMessageBox
from functools import partial
import os
import sys
import hashlib
import gzip
class niUpdater:
def __init__(self, parent):
self.myParent = parent
self.topContainer = Frame(parent)
self.topContainer.pack(side=TOP, expand=1, fill=X, anchor=NW)
self.btmContainer = Frame(parent)
self.btmContainer.pack(side=BOTTOM, expand=1, fill=X, anchor=NW)
path = StringVar()
ver = StringVar()
path.set(myloc + "\\NordInvasion")
ver.set("0.4.9")
entry1 = Entry(self.topContainer, textvariable=path)
entry1.pack(side=LEFT, expand=1, fill=X)
entry2 = Entry(self.topContainer, textvariable=ver, width =7)
entry2.pack(side=LEFT, expand=0)
#------------------ BUTTON #1 ------------------------------------
button_name = "OK"
# command binding
var = StringVar()
self.button1 = Button(self.topContainer, command=lambda: self.buttonPress(entry1.get(),var,entry2.get()))
# event binding -- passing the event as an argument
self.button1.bind("<Return>",
lambda
event :
self.buttonHandler_a(entry1.get(),var)
)
self.button1.configure(text=button_name, width=5)
self.button1.pack(side=LEFT)
self.button1.focus_force() # Put keyboard focus on button1
self.label = Label(self.btmContainer,textvariable=var, width=55, anchor=W, justify=LEFT)
self.label.pack(side=LEFT, expand=1, fill=X)
var.set("Press OK to start")
def writeOut(self,dir,hashfile,toplevel,var,folder):
""" walks a directory, and executes a callback on each file """
dir = os.path.abspath(dir)
for file in [file for file in os.listdir(dir) if not file in [".",".."]]:
nfile = os.path.join(dir,file)
if os.path.isdir(nfile): # is a directory
hashfile.write("F::"+nfile.replace(toplevel,"") + "\n")
hashfile.write("X::\n")
var.set("Generating... " + "F::"+nfile.replace(toplevel,""))
root.update()
if not os.path.exists(folder + '\\' + nfile.replace(toplevel,"")):
os.mkdir(folder + '\\' + nfile.replace(toplevel,""))
self.writeOut(nfile,hashfile,toplevel,var,folder)
else: # is a file
# Generate the hash and add to hash file
h=(hashlib.sha1(open(nfile, 'rb').read()).hexdigest())
hashfile.write(nfile.replace(toplevel,"") + "\n")
var.set("Generating... " + nfile.replace(toplevel,""))
root.update()
hashfile.write(h + "\n")
# Generate a smaller, gzipped version of the file
with open(nfile, 'rb') as f_in:
with gzip.open(folder + '\\' + nfile.replace(toplevel,"") + '.gz', 'wb') as f_out:
f_out.writelines(f_in)
def buttonPress(self, path, var, versionNumber):
self.button1.configure(state=DISABLED)
import time
timestamp = int(time.time())
folderName = (myloc + '\\ni-mod-' + str(timestamp))
if not os.path.exists(folderName):
os.mkdir(folderName)
file = open(myloc + '\\hash.txt','wt')
file.write("V::1\n")
file.write("W::http://nordinvasion.com/mod/" + str(versionNumber) + "/\n")
self.writeOut(path,file,path+'\\',var,folderName)
file.close()
var.set("File Generated")
tkMessageBox.showinfo("NI Hash Gen", "Hash file generated successfully.")
self.button1.configure(state=NORMAL)
def buttonHandler_a(self, path, var):
self.buttonPress(path, var)
pathname = os.path.dirname(sys.argv[0])
myloc = os.path.abspath(pathname)
root = Tk()
niup = niUpdater(root)
root.wm_title("Nord Invasion Hash Generator")
root.mainloop()
|
Naozumi/hashgen
|
ni_hashGen.py
|
Python
|
mit
| 3,412 | 0.036928 |
import copy
import resource
import sys
import traceback
import unittest
import mock
import numpy as np
import sklearn.datasets
import sklearn.decomposition
import sklearn.ensemble
import sklearn.svm
from sklearn.utils.testing import assert_array_almost_equal
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from autosklearn.pipeline.regression import SimpleRegressionPipeline
from autosklearn.pipeline.components.base import \
AutoSklearnPreprocessingAlgorithm, AutoSklearnRegressionAlgorithm
import autosklearn.pipeline.components.regression as regression_components
import autosklearn.pipeline.components.feature_preprocessing as preprocessing_components
from autosklearn.pipeline.util import get_dataset
from autosklearn.pipeline.constants import *
class SimpleRegressionPipelineTest(unittest.TestCase):
_multiprocess_can_split_ = True
def test_io_dict(self):
regressors = regression_components._regressors
for r in regressors:
if regressors[r] == regression_components.RegressorChoice:
continue
props = regressors[r].get_properties()
self.assertIn('input', props)
self.assertIn('output', props)
inp = props['input']
output = props['output']
self.assertIsInstance(inp, tuple)
self.assertIsInstance(output, tuple)
for i in inp:
self.assertIn(i, (SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA))
self.assertEqual(output, (PREDICTIONS,))
self.assertIn('handles_regression', props)
self.assertTrue(props['handles_regression'])
self.assertIn('handles_classification', props)
self.assertIn('handles_multiclass', props)
self.assertIn('handles_multilabel', props)
self.assertFalse(props['handles_classification'])
self.assertFalse(props['handles_multiclass'])
self.assertFalse(props['handles_multilabel'])
def test_find_regressors(self):
regressors = regression_components._regressors
self.assertGreaterEqual(len(regressors), 1)
for key in regressors:
if hasattr(regressors[key], 'get_components'):
continue
self.assertIn(AutoSklearnRegressionAlgorithm,
regressors[key].__bases__)
def test_find_preprocessors(self):
preprocessors = preprocessing_components._preprocessors
self.assertGreaterEqual(len(preprocessors), 1)
for key in preprocessors:
if hasattr(preprocessors[key], 'get_components'):
continue
self.assertIn(AutoSklearnPreprocessingAlgorithm,
preprocessors[key].__bases__)
def test_configurations(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
print(cs)
cs.seed(1)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['regressor:sgd:n_iter'] is not None:
config._values['regressor:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston')
cls = SimpleRegressionPipeline(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0]:
continue
elif "all features are discarded" in e.args[0]:
continue
elif "removed all features" in e.args[0]:
continue
elif "Bug in scikit-learn:" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_signed_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
dataset_properties={'signed': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config and \
config[
'classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config and \
config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston')
cls = SimpleRegressionPipeline(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0] or \
"Bug in scikit-learn" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_sparse(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config and \
config[
'classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config and \
config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston',
make_sparse=True)
cls = SimpleRegressionPipeline(config, random_state=1)
try:
cls.fit(X_train, Y_train)
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_default_configuration(self):
for i in range(2):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes')
auto = SimpleRegressionPipeline(default)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(copy.deepcopy(X_test))
# The lower the worse
r2_score = sklearn.metrics.r2_score(Y_test, predictions)
self.assertAlmostEqual(0.41626416529791199, r2_score)
model_score = auto.score(copy.deepcopy(X_test), Y_test)
self.assertEqual(model_score, r2_score)
def test_repr(self):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
default = cs.get_default_configuration()
representation = repr(SimpleRegressionPipeline(default))
cls = eval(representation)
self.assertIsInstance(cls, SimpleRegressionPipeline)
def test_get_hyperparameter_search_space(self):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertIsInstance(cs, ConfigurationSpace)
conditions = cs.get_conditions()
hyperparameters = cs.get_hyperparameters()
self.assertEqual(130, len(hyperparameters))
self.assertEqual(len(hyperparameters) - 5, len(conditions))
def test_get_hyperparameter_search_space_include_exclude_models(self):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
include={'regressor': ['random_forest']})
self.assertEqual(cs.get_hyperparameter('regressor:__choice__'),
CategoricalHyperparameter('regressor:__choice__', ['random_forest']))
# TODO add this test when more than one regressor is present
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
exclude={'regressor': ['random_forest']})
self.assertNotIn('random_forest', str(cs))
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
include={'preprocessor': ['pca']})
self.assertEqual(cs.get_hyperparameter('preprocessor:__choice__'),
CategoricalHyperparameter('preprocessor:__choice__', ['pca']))
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
exclude={'preprocessor': ['no_preprocessing']})
self.assertNotIn('no_preprocessing', str(cs))
def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(
self):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
include={'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
self.assertEqual(cs.get_hyperparameter('regressor:__choice__').default,
'gradient_boosting')
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
include={'preprocessor': ['nystroem_sampler']})
self.assertEqual(cs.get_hyperparameter('regressor:__choice__').default,
'sgd')
def test_get_hyperparameter_search_space_only_forbidden_combinations(self):
self.assertRaisesRegexp(ValueError, "Cannot find a legal default "
"configuration.",
SimpleRegressionPipeline.get_hyperparameter_search_space,
include={'regressor': ['random_forest'],
'preprocessor': ['kitchen_sinks']})
# It must also be catched that no classifiers which can handle sparse
# data are located behind the densifier
self.assertRaisesRegexp(ValueError, "Cannot find a legal default "
"configuration",
SimpleRegressionPipeline.get_hyperparameter_search_space,
include={'regressor': ['ridge_regression'],
'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
@unittest.skip("test_get_hyperparameter_search_space_dataset_properties" +
" Not yet Implemented")
def test_get_hyperparameter_search_space_dataset_properties(self):
# TODO: We do not have any dataset properties for regression, so this
# test is somewhat stupid
pass
"""
full_cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
cs_mc = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertEqual(full_cs, cs_mc)
cs_ml = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertNotIn('k_nearest_neighbors', str(cs_ml))
self.assertNotIn('liblinear', str(cs_ml))
self.assertNotIn('libsvm_svc', str(cs_ml))
self.assertNotIn('sgd', str(cs_ml))
cs_sp = SimpleRegressionPipeline.get_hyperparameter_search_space(
sparse=True)
self.assertNotIn('extra_trees', str(cs_sp))
self.assertNotIn('gradient_boosting', str(cs_sp))
self.assertNotIn('random_forest', str(cs_sp))
cs_mc_ml = SimpleRegressionPipeline.get_hyperparameter_search_space()
self.assertEqual(cs_ml, cs_mc_ml)
self.assertRaisesRegexp(ValueError,
"No regressor to build a configuration space "
"for...", SimpleRegressionPipeline.
get_hyperparameter_search_space,
multiclass=True, multilabel=True, sparse=True)
"""
def test_predict_batched(self):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space()
default = cs.get_default_configuration()
cls = SimpleRegressionPipeline(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((356,), prediction.shape)
self.assertEqual(18, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_batched_sparse(self):
cs = SimpleRegressionPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
default = cs.get_default_configuration()
cls = SimpleRegressionPipeline(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((356,), prediction.shape)
self.assertEqual(18, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
@unittest.skip("test_check_random_state Not yet Implemented")
def test_check_random_state(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_X Not yet Implemented")
def test_validate_input_X(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_Y Not yet Implemented")
def test_validate_input_Y(self):
raise NotImplementedError()
def test_set_params(self):
pass
def test_get_params(self):
pass
|
hmendozap/auto-sklearn
|
test/test_pipeline/test_regression.py
|
Python
|
bsd-3-clause
| 18,360 | 0.000926 |
import os
import ycm_core
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-pedantic',
'-std=c++1y',
#'-stdlib=libc++',
'-x',
'c++',
'-Iinclude',
'-Itest/include',
'-Ilib/jest/include',
'-isystem',
'../BoostParts',
'-isystem',
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
jeaye/jeayeson
|
.ycm_extra_conf.py
|
Python
|
bsd-3-clause
| 3,051 | 0.041298 |
from __future__ import absolute_import
import numpy as np
import conf
from time import sleep
from functools import reduce
from hardware.robot.modules.motor_math import get_triangular_direction_vector
from hardware.robot.modules.com import send_encoder_steps_and_speed
##
## This is the code that instructs how to get from A to B
## using continual approximation and gradient descent
##
## Motors that have requested to move to the next step
## Only executes when all are true
## Motors have been assigned IDs which will then be
## Used to index motors_requested
## instructions has the following format per line:
## [CAN_NUMBER, X, Y]
instructions = np.genfromtxt('hardware/robot/image.tsv', delimiter='\t')
motors_requested = [True, True]
last_instruction_index = -1
current_instruction_index = 1
last_instruction = [1, 0, 0]
current_instruction = [1, 0, 0]
def check_all_requested():
## Returns true if all motors_requested values are true
return reduce(lambda x, y: x and y, motors_requested)
## TODO: Integrate sensor data to complete this function
def position_is_close_enough_to_goal():
return True
def gen_next_instruction():
global current_instruction_index
global current_instruction
global last_instruction_index
global last_instruction
last_instruction, last_instruction_index = current_instruction, current_instruction_index
current_instruction_index = current_instruction_index + 1
current_instruction = instructions[current_instruction_index]
while current_instruction[1] == -1:
current_instruction_index = current_instruction_index + 1
current_instruction = instructions[current_instruction_index]
return current_instruction, current_instruction_index, last_instruction, last_instruction_index
##TODO: CHECK FOR LABEL CHANGES
def request_step(motor_id):
global current_instruction_index
global current_instruction
global last_instruction_index
global last_instruction
if motor_id < len(motors_requested):
motors_requested[motor_id] = True
if check_all_requested() and position_is_close_enough_to_goal():
# print("pass" + str(current_instruction_index))
gen_next_instruction()
print("INSTRUCTION NUMBER: " + str(current_instruction_index))
from_x, from_y = last_instruction[1], last_instruction[2]
goal_x, goal_y = current_instruction[1], current_instruction[2]
turn_steps = get_triangular_direction_vector(
from_x,
from_y,
goal_x,
goal_y,
)
print("MOVEMENT VECTORS")
print((from_x, from_y), (goal_x, goal_y))
print("STEPS TO TURN (LEFT, RIGHT) MOTORS")
print(turn_steps)
left_steps = turn_steps[0]
right_steps = turn_steps[1]
max_steps = max(abs(left_steps), abs(right_steps))
##TODO: Turn into an async send_turn_ratio if problems arise
if conf.LMOTOR_IP != '0.0.0.0':
send_encoder_steps_and_speed(conf.LMOTOR_IP, left_steps, left_steps/max_steps)
if conf.RMOTOR_IP != '0.0.0.0':
send_encoder_steps_and_speed(conf.RMOTOR_IP, right_steps, right_steps/max_steps)
|
ut-ras/robotticelli
|
src/hardware/robot/run-real.py
|
Python
|
lgpl-3.0
| 3,189 | 0.007839 |
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster
class CyberlockerCh(DeadHoster):
__name__ = "CyberlockerCh"
__type__ = "hoster"
__version__ = "0.06"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?cyberlocker\.ch/\w+'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Cyberlocker.ch hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it")]
|
manuelm/pyload
|
module/plugins/hoster/CyberlockerCh.py
|
Python
|
gpl-3.0
| 485 | 0.014433 |
'''
A secret file with data that we can use in unit tests without needing to
clutter up that file with a bunch of raw data structures.
'''
from datetime import datetime
SEARCH_TEST_DATA = [
{
"created" : datetime(2015, 10, 1),
"published": datetime(2015, 10, 1),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "First Post",
"slug": "",
"text": "a bunch of words #foo #bar",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"title": "Second Post",
"status": "published",
"slug": "",
"text": "a bunch more words #foo #baz",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 3),
"published": datetime(2015, 10, 3),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "tslothrop",
"public": True,
"title": "Third Post",
"status": "published",
"slug": "",
"text": "a bunch #baz more #bar words",
"tags": [],
"type": "Post"
}
]
|
bgporter/wastebook
|
testData/postTestData.py
|
Python
|
mit
| 1,496 | 0.004679 |
# Projection 1D2D
# Project triangles from one meshed face to another mesh on the same box
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
# Prepare geometry
# Create a box
box = geompy.MakeBoxDXDYDZ(100, 100, 100)
# Get geom faces to mesh with triangles in the 1ts and 2nd meshes
faces = geompy.SubShapeAll(box, geompy.ShapeType["FACE"])
# 2 adjacent faces of the box
Face_1 = faces[2]
Face_2 = faces[0]
geompy.addToStudy( box, 'box' )
geompy.addToStudyInFather( box, Face_1, 'Face_1' )
geompy.addToStudyInFather( box, Face_2, 'Face_2' )
# Make the source mesh with Netgem2D
src_mesh = smesh.Mesh(Face_1, "Source mesh")
src_mesh.Segment().NumberOfSegments(15)
src_mesh.Triangle()
src_mesh.Compute()
# Mesh the target mesh using the algoritm Projection1D2D
tgt_mesh = smesh.Mesh(Face_2, "Target mesh")
tgt_mesh.Projection1D2D().SourceFace(Face_1,src_mesh)
tgt_mesh.Compute()
|
FedoraScientific/salome-smesh
|
doc/salome/examples/defining_hypotheses_ex11.py
|
Python
|
lgpl-2.1
| 1,063 | 0.01223 |
# From CPython 2.5.1
import sys
import os
import unittest
from array import array
from weakref import proxy
from test.test_support import TESTFN, findfile, is_jython, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEquals(self.f.tell(), p.tell())
self.f.close()
self.f = None
if is_jython:
from test_weakref import extra_collect
extra_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEquals('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEquals(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testRepr(self):
# verify repr works
self.assert_(repr(self.f).startswith("<open file '" + TESTFN))
def testErrors(self):
f = self.f
self.assertEquals(f.name, TESTFN)
# XXX: Jython doesn't support isatty
#self.assert_(not f.isatty())
self.assert_(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assert_(f.closed)
def testMethods(self):
# XXX: Jython file methods require valid arguments: closed file
# checks are done before parsing the arguments in CPython
#methods = ['next', 'read', 'readinto',
# 'readline', 'readlines', 'seek', 'tell', 'truncate',
# 'write', 'xreadlines', '__iter__']
noarg = object()
# XXX: Jython doesn't support isatty
#methods = dict(fileno=noarg, flush=noarg, isatty=noarg, next=noarg,
methods = dict(fileno=noarg, flush=noarg, next=noarg,
read=-1, readinto=array('c', 'x'), readline=-1,
readlines=noarg, seek=0, tell=noarg, truncate=0,
write='x', xreadlines=noarg, __iter__=noarg)
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assert_(self.f.closed)
for methodname, arg in methods.iteritems():
method = getattr(self.f, methodname)
# should raise on closed file
if arg is noarg:
self.assertRaises(ValueError, method)
else:
self.assertRaises(ValueError, method, arg)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEquals(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEquals(self.f.__exit__(*sys.exc_info()), None)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print >>sys.__stdout__, (
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.')
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = open(unicode(TESTFN), "w")
self.assert_(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = open(TESTFN, bad_mode)
except ValueError, msg:
if msg[0] != 0:
s = str(msg)
if s.find(TESTFN) != -1 or s.find(bad_mode) == -1:
self.fail("bad error message for invalid mode: %s" % s)
# if msg[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError, msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEquals(d, s)
def testTruncateOnWindows(self):
os.unlink(TESTFN)
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
f = open(TESTFN,'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = open(TESTFN)
if f.next() != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = open(TESTFN)
for i in range(nchunks):
f.next()
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
def test_main():
if is_jython:
# Jython's stdin can't seek, it's not backed by a
# RandomAccessFile
del OtherFileTests.testStdin
# Jython allows mixing reads with iteration
del OtherFileTests.testIteration
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
babble/babble
|
include/jython/Lib/test/test_file.py
|
Python
|
apache-2.0
| 13,201 | 0.000985 |
import os
import logging
from pdb import pm
from elfesteem import pe
from miasm2.analysis.sandbox import Sandbox_Win_x86_32
from miasm2.core import asmbloc
filename = os.environ.get('PYTHONSTARTUP')
if filename and os.path.isfile(filename):
execfile(filename)
# User defined methods
def kernel32_GetProcAddress(jitter):
ret_ad, args = jitter.func_args_stdcall(["libbase", "fname"])
dst_ad = jitter.cpu.EBX
logging.info('EBX ' + hex(dst_ad))
fname = (args.fname if args.fname < 0x10000
else jitter.get_str_ansi(args.fname))
logging.info(fname)
ad = sb.libs.lib_get_add_func(args.libbase, fname, dst_ad)
jitter.func_ret_stdcall(ret_ad, ad)
parser = Sandbox_Win_x86_32.parser(description="Generic UPX unpacker")
parser.add_argument("filename", help="PE Filename")
parser.add_argument('-v', "--verbose",
help="verbose mode", action="store_true")
parser.add_argument("--graph",
help="Export the CFG graph in graph.txt",
action="store_true")
options = parser.parse_args()
sb = Sandbox_Win_x86_32(options.filename, options, globals())
if options.verbose is True:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
if options.verbose is True:
print sb.jitter.vm
ep = sb.entry_point
# Ensure there is one and only one leave (for OEP discovering)
mdis = sb.machine.dis_engine(sb.jitter.bs)
mdis.dont_dis_nulstart_bloc = True
ab = mdis.dis_multibloc(ep)
bb = asmbloc.basicblocs(ab)
leaves = bb.get_bad_dst()
assert(len(leaves) == 1)
l = leaves.pop()
logging.info(l)
end_label = l.label.offset
logging.info('final label')
logging.info(end_label)
# Export CFG graph (dot format)
if options.graph is True:
g = asmbloc.bloc2graph(ab)
open("graph.txt", "w").write(g)
if options.verbose is True:
print sb.jitter.vm
def update_binary(jitter):
sb.pe.Opthdr.AddressOfEntryPoint = sb.pe.virt2rva(jitter.pc)
logging.info('updating binary')
for s in sb.pe.SHList:
sdata = sb.jitter.vm.get_mem(sb.pe.rva2virt(s.addr), s.rawsize)
sb.pe.virt[sb.pe.rva2virt(s.addr)] = sdata
# Set callbacks
sb.jitter.add_breakpoint(end_label, update_binary)
# Run
sb.run()
# Rebuild PE
new_dll = []
sb.pe.SHList.align_sections(0x1000, 0x1000)
logging.info(repr(sb.pe.SHList))
sb.pe.DirRes = pe.DirRes(sb.pe)
sb.pe.DirImport.impdesc = None
logging.info(repr(sb.pe.DirImport.impdesc))
new_dll = sb.libs.gen_new_lib(sb.pe)
logging.info(new_dll)
sb.pe.DirImport.impdesc = []
sb.pe.DirImport.add_dlldesc(new_dll)
s_myimp = sb.pe.SHList.add_section(name="myimp", rawsize=len(sb.pe.DirImport))
logging.info(repr(sb.pe.SHList))
sb.pe.DirImport.set_rva(s_myimp.addr)
# XXXX TODO
sb.pe.NThdr.optentries[pe.DIRECTORY_ENTRY_DELAY_IMPORT].rva = 0
sb.pe.Opthdr.AddressOfEntryPoint = sb.pe.virt2rva(end_label)
bname, fname = os.path.split(options.filename)
fname = os.path.join(bname, fname.replace('.', '_'))
open(fname + '_unupx.bin', 'w').write(str(sb.pe))
|
amohanta/miasm
|
example/jitter/unpack_upx.py
|
Python
|
gpl-2.0
| 3,028 | 0.000661 |
"""
Extensible permission system for pybbm
"""
from django.db.models import Q
from pybb import defaults
from pybb.models import Topic, PollAnswerUser
from pybb.permissions import DefaultPermissionHandler
class CustomPermissionHandler(DefaultPermissionHandler):
"""
Custom Permission handler for PyBB.
Inherits from DefaultPermissionHandler.
Methods starting with `may` are expected to return `True` or `False`,
whereas methods starting with `filter_*` should filter the queryset they
receive, and return a new queryset containing only the objects the user is
allowed to see.
Activated by setting `settings.PYBB_PERMISSION_HANDLER`.
"""
#
# permission checks on categories
#
def filter_categories(self, user, qs):
""" return a queryset with categories `user` is allowed to see """
if not user.is_authenticated:
return qs.exclude()
return qs.filter(hidden=False) if not user.is_staff else qs
def may_view_category(self, user, category):
""" return True if `user` may view this category, False if not """
return user.is_staff or (user.is_authenticated and not category.hidden)
#
# permission checks on forums
#
def filter_forums(self, user, qs):
""" return a queryset with forums `user` is allowed to see """
return qs.filter(Q(hidden=False) & Q(category__hidden=False))\
if not user.is_staff else qs
def may_view_forum(self, user, forum):
""" return True if user may view this forum, False if not """
return (user.is_staff or
(user.is_authenticated and forum.hidden is False and
forum.category.hidden is False))
def may_create_topic(self, user, forum):
""" return True if `user` is allowed to create a new topic in `forum`
"""
return user.is_authenticated and user.has_perm('pybb.add_post')
#
# permission checks on topics
#
def filter_topics(self, user, qs):
""" return a queryset with topics `user` is allowed to see """
if not user.is_staff:
qs = qs.filter(
Q(forum__hidden=False) & Q(forum__category__hidden=False))
if not user.is_superuser:
if user.is_authenticated:
qs = qs.filter(
Q(forum__moderators=user) | Q(user=user) |
Q(on_moderation=False)
).distinct()
else:
qs = qs.filter(on_moderation=False)
return qs
def may_view_topic(self, user, topic):
""" return True if user may view this topic, False otherwise """
if not user.is_authenticated:
return False
if user.is_superuser:
return True
if not user.is_staff and (
topic.forum.hidden or topic.forum.category.hidden):
return False # only staff may see hidden forum / category
if topic.on_moderation:
return user.is_authenticated and (
user == topic.user or user in topic.forum.moderators)
return True
def may_vote_in_topic(self, user, topic):
""" return True if `user` may vote in `topic` """
return (
user.is_authenticated and
topic.poll_type != Topic.POLL_TYPE_NONE and
not topic.closed and
not PollAnswerUser.objects.filter(
poll_answer__topic=topic, user=user).exists()
)
def may_create_post(self, user, topic):
"""Return True if `user` is allowed to create a new post in `topic`"""
if topic.forum.hidden and (not user.is_staff):
# if topic is hidden, only staff may post
return False
if topic.closed and (not user.is_superuser):
# if topic is closed, only staff may post
return False
# only user which have 'pybb.add_post' permission may post
return (defaults.PYBB_ENABLE_ANONYMOUS_POST or
(user.is_authenticated and user.has_perm('pybb.add_post')))
def may_post_as_admin(self, user):
""" return True if `user` may post as admin """
return user.is_superuser
#
# permission checks on posts
#
def filter_posts(self, user, qs):
""" return a queryset with posts `user` is allowed to see """
# first filter by topic availability
if not user.is_superuser:
qs = qs.filter(
Q(topic__forum__hidden=False) &
Q(topic__forum__category__hidden=False))
if not defaults.PYBB_PREMODERATION or user.is_superuser:
# superuser may see all posts, also if premoderation is turned off
# moderation flag is ignored
return qs
elif user.is_authenticated:
# post is visible if user is author, post is not on moderation, or
# user is moderator for this forum
qs = qs.filter(
Q(user=user) | Q(on_moderation=False) |
Q(topic__forum__moderators=user))
else:
# anonymous user may not see posts which are on moderation
qs = qs.filter(on_moderation=False)
return qs
def may_view_post(self, user, post):
""" return True if `user` may view `post`, False otherwise """
if not user.is_authenticated:
return False
if user.is_superuser:
return True
if post.on_moderation:
return post.user == user or\
user in post.topic.forum.moderators.all()
return True
def may_edit_post(self, user, post):
""" return True if `user` may edit `post` """
return user.is_superuser or\
post.user == user or self.may_moderate_topic(user, post.topic)
def may_delete_post(self, user, post):
""" return True if `user` may delete `post` """
return self.may_moderate_topic(user, post.topic)
#
# permission checks on users
#
def may_block_user(self, user, user_to_block):
""" return True if `user` may block `user_to_block` """
return user.has_perm('pybb.block_users')
def may_attach_files(self, user):
"""
return True if `user` may attach files to posts, False otherwise.
By default controlled by PYBB_ATTACHMENT_ENABLE setting
"""
return defaults.PYBB_ATTACHMENT_ENABLE and user.is_authenticated
def may_create_poll(self, user):
"""
return True if `user` may attach files to posts, False otherwise.
By default always True
"""
return user.is_authenticated
|
ugoertz/django-familio
|
accounts/permissions.py
|
Python
|
bsd-3-clause
| 6,670 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# feeluown documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 2 20:55:54 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'feeluown'
copyright = '2015, cosven'
author = 'cosven'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'cn'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'feeluowndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'feeluown.tex', 'feeluown Documentation',
'cosven', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'feeluown', 'feeluown Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'feeluown', 'feeluown Documentation',
author, 'feeluown', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
JanlizWorldlet/FeelUOwn
|
sphinx_doc/source/conf.py
|
Python
|
mit
| 9,220 | 0.005965 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-11 10:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0018_auto_20170411_1301'),
]
operations = [
migrations.AddField(
model_name='productorder',
name='total_price_with_postage',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Total price with postage'),
),
]
|
andersinno/kuvaselaamo
|
hkm/migrations/0019_productorder_total_price_with_postage.py
|
Python
|
mit
| 554 | 0.001805 |
# -*- coding: utf-8 -*-
"""
flaskbb.forum.forms
~~~~~~~~~~~~~~~~~~~
It provides the forms that are needed for the forum views.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from flask_wtf import Form
from wtforms import (TextAreaField, StringField, SelectMultipleField,
BooleanField, SubmitField)
from wtforms.validators import DataRequired, Optional, Length
from flask_babelplus import lazy_gettext as _
from flaskbb.forum.models import Topic, Post, Report, Forum
from flaskbb.user.models import User
class QuickreplyForm(Form):
content = TextAreaField(_("Quick reply"), validators=[
DataRequired(message=_("You cannot post a reply without content."))])
submit = SubmitField(_("Reply"))
def save(self, user, topic):
post = Post(content=self.content.data)
return post.save(user=user, topic=topic)
class ReplyForm(Form):
content = TextAreaField(_("Content"), validators=[
DataRequired(message=_("You cannot post a reply without content."))])
track_topic = BooleanField(_("Track this topic"), default=False,
validators=[Optional()])
submit = SubmitField(_("Reply"))
preview = SubmitField(_("Preview"))
def save(self, user, topic):
post = Post(content=self.content.data)
if self.track_topic.data:
user.track_topic(topic)
return post.save(user=user, topic=topic)
class NewTopicForm(ReplyForm):
title = StringField(_("Topic title"), validators=[
DataRequired(message=_("Please choose a title for your topic."))])
content = TextAreaField(_("Content"), validators=[
DataRequired(message=_("You cannot post a reply without content."))])
track_topic = BooleanField(_("Track this topic"), default=False,
validators=[Optional()])
submit = SubmitField(_("Post Topic"))
preview = SubmitField(_("Preview"))
def save(self, user, forum):
topic = Topic(title=self.title.data)
post = Post(content=self.content.data)
if self.track_topic.data:
user.track_topic(topic)
return topic.save(user=user, forum=forum, post=post)
class ReportForm(Form):
reason = TextAreaField(_("Reason"), validators=[
DataRequired(message=_("What is the reason for reporting this post?"))
])
submit = SubmitField(_("Report post"))
def save(self, user, post):
report = Report(reason=self.reason.data)
return report.save(post=post, user=user)
class UserSearchForm(Form):
search_query = StringField(_("Search"), validators=[
Optional(), Length(min=3, max=50)
])
submit = SubmitField(_("Search"))
def get_results(self):
query = self.search_query.data
return User.query.whooshee_search(query)
class SearchPageForm(Form):
search_query = StringField(_("Criteria"), validators=[
DataRequired(), Length(min=3, max=50)])
search_types = SelectMultipleField(_("Content"), validators=[
DataRequired()], choices=[('post', _('Post')), ('topic', _('Topic')),
('forum', _('Forum')), ('user', _('Users'))])
submit = SubmitField(_("Search"))
def get_results(self):
# Because the DB is not yet initialized when this form is loaded,
# the query objects cannot be instantiated in the class itself
search_actions = {
'post': Post.query.whooshee_search,
'topic': Topic.query.whooshee_search,
'forum': Forum.query.whooshee_search,
'user': User.query.whooshee_search
}
query = self.search_query.data
types = self.search_types.data
results = {}
for search_type in search_actions.keys():
if search_type in types:
results[search_type] = search_actions[search_type](query)
return results
|
realityone/flaskbb
|
flaskbb/forum/forms.py
|
Python
|
bsd-3-clause
| 3,976 | 0 |
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView, DayArchiveView, CreateView,\
TemplateView
from datetime import date
from antxetamedia.agenda.forms import HappeningForm
from antxetamedia.agenda.models import Town, Happening
class BaseHappeningList(ListView):
allow_empty=True
def get_context_data(self, **kwargs):
c = super(BaseHappeningList, self).get_context_data(**kwargs)
c['towns'] = Town.objects.all()
return c
class FutureHappeningList(BaseHappeningList):
def get_queryset(self):
return Happening.objects.future()
class TownHappeningList(BaseHappeningList):
def get_queryset(self):
self.town = get_object_or_404(Town, slug=self.kwargs['slug'])
return Happening.objects.filter(town=self.town)
def get_context_data(self, **kwargs):
c = super(TownHappeningList, self).get_context_data(**kwargs)
c['reason'] = self.town
return c
class OtherTownHappeningList(TownHappeningList):
def get_queryset(self):
self.town = _('Other')
return Happening.objects.filter(town__isnull=True,
other_town__isnull=False)
class DayHappeningList(DayArchiveView):
model = Happening
date_field = 'date'
template_name = 'agenda/happening_list.html'
allow_future = True
month_format = '%m'
def get_context_data(self, **kwargs):
c = super(DayHappeningList, self).get_context_data(**kwargs)
c['towns'] = Town.objects.all()
return c
class HappeningDetail(DetailView):
model = Happening
slug_field = 'slug'
def get_context_data(self, **kwargs):
c = super(HappeningDetail, self).get_context_data(**kwargs)
c['towns'] = Town.objects.all()
return c
class CreateHappening(CreateView):
model = Happening
form_class = HappeningForm
def get_context_data(self, **kwargs):
c = super(CreateHappening, self).get_context_data(**kwargs)
c['towns'] = Town.objects.all()
return c
class SuccessfulCreate(TemplateView):
template_name = 'agenda/successful.html'
|
GISAElkartea/antxetamedia
|
antxetamedia/agenda/views.py
|
Python
|
agpl-3.0
| 2,216 | 0.002256 |
import pygame, sys
from pygame.locals import *
# --- Functions ---
def distance(speed, time):
distance = time * speed
return distance
# --- Classes ---
class Character(object):
def __init__(self, position, direction, sprite):
self.position = position
self.direction = direction
self.sprite = sprite
self.nextPosition = self.position
def move(self, direction):
if self.direction == 0:
self.nextPosition[1] = self.nextPosition[1] - 64
elif self.direction == 1:
self.nextPosition[0] = self.nextPosition[0] - 64
elif self.direction == 2:
self.nextPosition[1] = self.nextPosition[1] + 64
elif self.direction == 3:
self.nextPosition[0] = self.nextPosition[0] + 64
def updateAnimation(self):
# --- Set up ---
pygame.init()
windowSurface = pygame.display.set_mode((1216, 768), 0, 32) #always 0 and 32
pygame.display.set_caption('Lanceloet van Denemerken')
basicFont = pygame.font.SysFont(None, 23)
mainClock = pygame.time.Clock()
# --- Other variables ---
showDebug = True
loopTrack = 0
playerPos = [0, 0]
nextPlayerPos = [0, 0]
lastPress = 0
# --- Constants ---
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
# --- Objects ---
# --- Image & music Loading ---
directionList = [pygame.image.load('up.png'),
pygame.image.load('left.png'),
pygame.image.load('down.png'),
pygame.image.load('right.png'),
pygame.image.load('upl.png'),
pygame.image.load('leftl.png'),
pygame.image.load('downl.png'),
pygame.image.load('rightl.png'),
pygame.image.load('upr.png'),
pygame.image.load('leftr.png'),
pygame.image.load('downr.png'),
pygame.image.load('rightr.png')]
picScaleTrack = 0
for picture in directionList:
directionList[picScaleTrack] = pygame.transform.scale(picture, (64, 64))
picScaleTrack += 1
player = directionList[5]
tile = pygame.image.load('tile.png')
# --- Main loop ---
while True:
# --- Variables outside gamestate ---
frameTime = mainClock.tick(1000)
FPS = mainClock.get_fps()
currentTime = pygame.time.get_ticks()
mousePosition = pygame.mouse.get_pos()
loopTrack = loopTrack + 1
# --- first blit/fill ---
windowSurface.fill(WHITE)
windowSurface.blit(player, (playerPos[0], playerPos[1]))
# --- Movement ---
if pygame.key.get_pressed()[119] and pygame.time.get_ticks() - lastPress >= 100:
player = directionList[0]
nextPlayerPos[1] -= 64
elif pygame.key.get_pressed()[97] and pygame.time.get_ticks() - lastPress >= 100:
player = directionList[1]
nextPlayerPos[0] -= 64
elif pygame.key.get_pressed()[115] and pygame.time.get_ticks() - lastPress >= 100:
player = directionList[2]
nextPlayerPos[1] += 64
elif pygame.key.get_pressed()[100] and pygame.time.get_ticks() - lastPress >= 100:
player = directionList[3]
nextPlayerPos[0] += 64
if (pygame.key.get_pressed()[119] or pygame.key.get_pressed()[97] or pygame.key.get_pressed()[115] or pygame.key.get_pressed()[100]) and pygame.time.get_ticks() - lastPress >= 100:
lastPress = pygame.time.get_ticks()
# --- Animation ---
if playerPos[0] != nextPlayerPos[0]:
if playerPos[0] < nextPlayerPos[0]:
playerPos[0] += distance(1, frameTime)
elif playerPos[0] > nextPlayerPos[0]:
playerPos[0] -= distance(1, frameTime)
if playerPos[1] != nextPlayerPos[1]:
if playerPos[1] < nextPlayerPos[1]:
playerPos[1] += distance(1, frameTime)
elif playerPos[1] > nextPlayerPos[1]:
playerPos[1] -= distance(1, frameTime)
# --- Tile visualisation ---
for y in range(-1, 12):
for x in range(-1, 19):
windowSurface.blit(tile, (x * 64, y * 64))
# --- Debug ---
if showDebug == True:
debug = nextPlayerPos
debugText = basicFont.render(str(debug), True, RED) #text | antialiasing | color
windowSurface.blit(debugText, (1, 1))
# --- Events ---
pygame.display.update()
for event in pygame.event.get():
if event.type == KEYUP:
if event.key == 284:
showDebug = not showDebug
if event.type == QUIT:
pygame.quit()
sys.exit()
|
Pietdagamer/Lanseloet
|
Old/2.py
|
Python
|
mit
| 4,586 | 0.006542 |
# -*- coding: utf-8 -*-
#
# PyOmicron documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 26 09:12:21 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'sphinxcontrib.programoutput',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyOmicron'
copyright = u'2016, Duncan Macleod'
author = u'Duncan Macleod'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'PyOmicron v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyOmicrondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyOmicron.tex', u'PyOmicron Documentation',
u'Duncan Macleod', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyomicron', u'PyOmicron Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyOmicron', u'PyOmicron Documentation',
author, 'PyOmicron', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'gwpy': ('http://gwpy.github.io/docs/latest/', None),
}
|
ligovirgo/pyomicron
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,982 | 0.00561 |
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '{{ taiga_database_username }}',
'USER': '{{ taiga_database_username }}',
'PASSWORD': '{{ taiga_database_password }}',
'HOST': '{{ taiga_database_host }}',
'PORT': '{{ taiga_database_port }}',
}
}
HOST="http://{{ taiga_hostname }}:{{ taiga_port }}"
#MEDIA_ROOT = '/home/taiga/media'
#STATIC_ROOT = '/home/taiga/static'
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = True
EMAIL_HOST = "{{ taiga_email_host }}"
EMAIL_HOST_USER = "{{ taiga_aws_access_key_id }}"
EMAIL_HOST_PASSWORD = "{{ taiga_aws_secret_access_key }}"
EMAIL_PORT = "{{ taiga_email_port }}"
DEFAULT_FROM_EMAIL = "{{ taiga_from_email_address }}"
# THROTTLING
#REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"] = {
# "anon": "20/min",
# "user": "200/min",
# "import-mode": "20/sec"
#}
# GITHUB SETTINGS
#GITHUB_URL = "https://github.com/"
#GITHUB_API_URL = "https://api.github.com/"
#GITHUB_API_CLIENT_ID = "yourgithubclientid"
#GITHUB_API_CLIENT_SECRET = "yourgithubclientsecret"
|
JScott/ansible-taiga
|
templates/opt/taiga/back/settings/local.py
|
Python
|
mit
| 1,982 | 0.005561 |
#!/usr/bin/env python
# -*- coding: <utf-8> -*-
"""
This file is part of Spartacus project
Copyright (C) 2016 CSE
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from CapuaEnvironment.Instruction.OperationDescription import operationDescription
__author__ = "CSE"
__copyright__ = "Copyright 2015, CSE"
__credits__ = ["CSE"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "CSE"
__status__ = "Dev"
class Instruction:
"""
This class is only used for instruction encapsulation after they are fetched from memory.
An instance of this class is built by the InstructionFetchUnit and is passed to the
ExecutionUnit for execution. This class is simply there to help lower the number of
binary parsing required in the execution of a given instruction. It is also used
by the Assembler in order to help build the binary code associated with a specific
instruction.
"""
instructionCode = None
sourceRegister = None
destinationRegister = None
sourceImmediate = None
destinationImmediate = None
width = None
flags = None
operationMnemonic = None
instructionLength = None
def __init__(self, binaryInstruction=0b000000, form=None, skipValidation=False):
"""
This allow for initialisation of the instruction by parsing the binary instruction code
:param binaryInstruction: A big number representing the instruction
:param form: The form representing the instruction as shown in FormDescription.py
:return: An instruction! Warning, this instruction could be invalid!
"""
if not skipValidation:
self.instructionLength = form["length"]
# Parse the description so we can initiate the instruction
for descriptionElement in form["description"]:
mask = form["description"][descriptionElement]
extractedBinary = self._extractValueFromBinaryField(mask, binaryInstruction)
# There is no validation here for performance reason
# WE RELY ON THE FACT that the form description is correct
# and does not contain any typo!!!
setattr(self, descriptionElement, extractedBinary)
# This will simply get the instruction mnemonic from a list of possible mnemonics
for instructionMnemonic in operationDescription:
if self.instructionCode in operationDescription[instructionMnemonic]:
self.operationMnemonic = instructionMnemonic
break
if self.operationMnemonic is None:
raise ValueError("Invalid instruction detected")
return
def _extractValueFromBinaryField(self, mask, field):
"""
This method takes a value and a binary mask. It will extract the part of the value that
is covered by the mask and return it to the user.
Example:
value = 0b11111111
mask = 0b00110000
result = 0b00110000
returned value = 0b11
:param mask: Binary mask showing what need to be extracted
:param field: The field from which we want to do the extract
:return: int, the extracted value
"""
# First, we get the mask at the right. Looking for mask % 2 will, when = 1
# tell us that the mask is now fully aligned at the right
if mask > 0:
while (mask % 2) != 1:
mask >>= 1
field >>= 1
field &= mask # The result is the "parsed" value
return field
|
CommunicationsSecurityEstablishment/spartacus
|
CapuaEnvironment/Instruction/Instruction.py
|
Python
|
gpl-2.0
| 4,253 | 0.003292 |
#!/usr/local/bin/python3
# coding: utf-8
try:
import json, requests, urllib
from geopy.geocoders import Nominatim
from geopy.distance import vincenty
except:
print("Error importing modules, exiting.")
exit()
api_url = "http://opendata.iprpraha.cz/CUR/FSV/FSV_VerejnaWC_b/WGS_84/FSV_VerejnaWC_b.json"
def find(address):
# convert address to latlong
me = locate(address+", Prague")
if me == None:
return None
toilets = getToilets(api_url)
# Get closest toilet
wcID = getClosestID(me, toilets)
wc = toilets[wcID-1]
data = []
try:
address = wc['properties']['ADRESA']
except:
address = "Address not available."
try:
typ = wc['properties']['TYP']
except:
typ = ""
r = "Closest public toilet is {} meters away.\n{}".format(getDist(me, wc), address)
return [r , getCoords(wc)]
def getClosestID(me, toilets):
a = {}
for toilet in toilets:
ID = toilet['properties']['OBJECTID']
a[ID] = getDist(me, toilet)
closest = min(a,key=a.get) # list offset
print("ID {} is {} meters away.".format(closest,a[closest]))
return closest
def getDist( coords, toilet):
loc = toilet['geometry']['coordinates']
loc = (loc[1],loc[0]) # Switch coords position
dist = round(vincenty(coords, loc).meters)
return dist
def getCoords(toilet):
loc = toilet['geometry']['coordinates']
return (loc[1],loc[0])
def locate(address):
geolocator = Nominatim()
location = geolocator.geocode(address)
if location:
coords = (location.latitude, location.longitude)
return coords
return None
def getToilets(url):
# outputs list of dicts
response = requests.get(url)
content = response.content.decode('utf-8')
js = json.loads(content)
return js['features']
# x = find()
# print(x)
|
vkotek/kotek_bot
|
features/toilet_finder.py
|
Python
|
unlicense
| 1,885 | 0.009549 |
#!/usr/bin/env python
import sys
import os
from treestore import Treestore
try: taxonomy = sys.argv[1]
except: taxonomy = None
t = Treestore()
treebase_uri = 'http://purl.org/phylo/treebase/phylows/tree/%s'
tree_files = [x for x in os.listdir('trees') if x.endswith('.nex')]
base_uri = 'http://www.phylocommons.org/trees/%s'
tree_list = set(t.list_trees())
for tree_uri in tree_list:
if not 'TB2_' in tree_uri: continue
tree_id = t.id_from_uri(tree_uri)
tb_uri = treebase_uri % (tree_id.replace('_', ':'))
print tree_id, tb_uri
t.annotate(tree_uri, annotations='?tree bibo:cites <%s> .' % tb_uri)
|
NESCent/phylocommons
|
tools/treebase_scraper/annotate_trees.py
|
Python
|
mit
| 622 | 0.008039 |
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision$
__author__ = "Cyril Jaquier"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import unittest
from server.datedetector import DateDetector
from server.datetemplate import DateTemplate
class DateDetectorTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.__datedetector = DateDetector()
self.__datedetector.addDefaultTemplate()
def tearDown(self):
"""Call after every test case."""
def testGetEpochTime(self):
log = "1138049999 [sshd] error: PAM: Authentication failure"
date = [2006, 1, 23, 21, 59, 59, 0, 23, 0]
dateUnix = 1138049999.0
self.assertEqual(self.__datedetector.getTime(log), date)
self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix)
def testGetTime(self):
log = "Jan 23 21:59:59 [sshd] error: PAM: Authentication failure"
date = [2005, 1, 23, 21, 59, 59, 1, 23, -1]
dateUnix = 1106513999.0
self.assertEqual(self.__datedetector.getTime(log), date)
self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix)
# def testDefaultTempate(self):
# self.__datedetector.setDefaultRegex("^\S{3}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2}")
# self.__datedetector.setDefaultPattern("%b %d %H:%M:%S")
#
# log = "Jan 23 21:59:59 [sshd] error: PAM: Authentication failure"
# date = [2005, 1, 23, 21, 59, 59, 1, 23, -1]
# dateUnix = 1106513999.0
#
# self.assertEqual(self.__datedetector.getTime(log), date)
# self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix)
|
yarikoptic/Fail2Ban-Old-SVNGIT
|
testcases/datedetectortestcase.py
|
Python
|
gpl-2.0
| 2,300 | 0.01913 |
# Unix SMB/CIFS implementation.
# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This provides a wrapper around the cmd interface so that tests can
# easily be built on top of it and have minimal code to run basic tests
# of the commands. A list of the environmental variables can be found in
# ~/selftest/selftest.pl
#
# These can all be accesses via os.environ["VARIBLENAME"] when needed
import random
import string
from samba.auth import system_session
from samba.samdb import SamDB
from cStringIO import StringIO
from samba.netcmd.main import cmd_sambatool
import samba.tests
class SambaToolCmdTest(samba.tests.TestCaseInTempDir):
def getSamDB(self, *argv):
"""a convenience function to get a samdb instance so that we can query it"""
# We build a fake command to get the options created the same
# way the command classes do it. It would be better if the command
# classes had a way to more cleanly do this, but this lets us write
# tests for now
cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"]
parser, optiongroups = cmd._create_parser("user")
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
H = kwargs.get("H", None)
sambaopts = kwargs.get("sambaopts", None)
credopts = kwargs.get("credopts", None)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
return samdb
def runcmd(self, name, *args):
"""run a single level command"""
cmd = cmd_sambatool.subcommands[name]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def runsubcmd(self, name, sub, *args):
"""run a command with sub commands"""
# The reason we need this function separate from runcmd is
# that the .outf StringIO assignment is overriden if we use
# runcmd, so we can't capture stdout and stderr
cmd = cmd_sambatool.subcommands[name].subcommands[sub]
cmd.outf = StringIO()
cmd.errf = StringIO()
result = cmd._run(name, *args)
return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
def assertCmdSuccess(self, val, msg=""):
self.assertIsNone(val, msg)
def assertCmdFail(self, val, msg=""):
self.assertIsNotNone(val, msg)
def assertMatch(self, base, string, msg=""):
self.assertTrue(string in base, msg)
def randomName(self, count=8):
"""Create a random name, cap letters and numbers, and always starting with a letter"""
name = random.choice(string.ascii_uppercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1))
return name
def randomPass(self, count=16):
name = random.choice(string.ascii_uppercase)
name += random.choice(string.digits)
name += random.choice(string.ascii_lowercase)
name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3))
return name
def randomXid(self):
# pick some hopefully unused, high UID/GID range to avoid interference
# from the system the test runs on
xid = random.randint(4711000, 4799000)
return xid
def assertWithin(self, val1, val2, delta, msg=""):
"""Assert that val1 is within delta of val2, useful for time computations"""
self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
|
yasoob/PythonRSSReader
|
venv/lib/python2.7/dist-packages/samba/tests/samba_tool/base.py
|
Python
|
mit
| 4,702 | 0.002127 |
from pywin.mfc import dialog
import win32api
import win32con
import win32ui
import copy
import string
from . import scintillacon
# Used to indicate that style should use default color
from win32con import CLR_INVALID
######################################################
# Property Page for syntax formatting options
# The standard 16 color VGA palette should always be possible
paletteVGA = (
("Black", win32api.RGB(0,0,0)),
("Navy", win32api.RGB(0,0,128)),
("Green", win32api.RGB(0,128,0)),
("Cyan", win32api.RGB(0,128,128)),
("Maroon", win32api.RGB(128,0,0)),
("Purple", win32api.RGB(128,0,128)),
("Olive", win32api.RGB(128,128,0)),
("Gray", win32api.RGB(128,128,128)),
("Silver", win32api.RGB(192,192,192)),
("Blue", win32api.RGB(0,0,255)),
("Lime", win32api.RGB(0,255,0)),
("Aqua", win32api.RGB(0,255,255)),
("Red", win32api.RGB(255,0,0)),
("Fuchsia", win32api.RGB(255,0,255)),
("Yellow", win32api.RGB(255,255,0)),
("White", win32api.RGB(255,255,255)),
# and a few others will generally be possible.
("DarkGrey", win32api.RGB(64,64,64)),
("PurpleBlue", win32api.RGB(64,64,192)),
("DarkGreen", win32api.RGB(0,96,0)),
("DarkOlive", win32api.RGB(128,128,64)),
("MediumBlue", win32api.RGB(0,0,192)),
("DarkNavy", win32api.RGB(0,0,96)),
("Magenta", win32api.RGB(96,0,96)),
("OffWhite", win32api.RGB(255,255,220)),
("LightPurple", win32api.RGB(220,220,255)),
("<Default>", win32con.CLR_INVALID)
)
class ScintillaFormatPropertyPage(dialog.PropertyPage):
def __init__(self, scintillaClass = None, caption = 0):
self.scintillaClass = scintillaClass
dialog.PropertyPage.__init__(self, win32ui.IDD_PP_FORMAT, caption=caption)
def OnInitDialog(self):
try:
if self.scintillaClass is None:
from . import control
sc = control.CScintillaEdit
else:
sc = self.scintillaClass
self.scintilla = sc()
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.ES_MULTILINE
# Convert the rect size
rect = self.MapDialogRect( (5, 5, 120, 75))
self.scintilla.CreateWindow(style, rect, self, 111)
self.HookNotify(self.OnBraceMatch, scintillacon.SCN_CHECKBRACE)
self.scintilla.HookKeyStroke(self.OnEsc, 27)
self.scintilla.SCISetViewWS(1)
self.pos_bstart = self.pos_bend = self.pos_bbad = 0
colorizer = self.scintilla._GetColorizer()
text = colorizer.GetSampleText()
items = text.split('|', 2)
pos = len(items[0])
self.scintilla.SCIAddText(''.join(items))
self.scintilla.SetSel(pos, pos)
self.scintilla.ApplyFormattingStyles()
self.styles = self.scintilla._GetColorizer().styles
self.cbo = self.GetDlgItem(win32ui.IDC_COMBO1)
for c in paletteVGA:
self.cbo.AddString(c[0])
self.cboBoldItalic = self.GetDlgItem(win32ui.IDC_COMBO2)
for item in ["Bold Italic", "Bold", "Italic", "Regular"]:
self.cboBoldItalic.InsertString(0, item)
self.butIsDefault = self.GetDlgItem(win32ui.IDC_CHECK1)
self.butIsDefaultBackground = self.GetDlgItem(win32ui.IDC_CHECK2)
self.listbox = self.GetDlgItem(win32ui.IDC_LIST1)
self.HookCommand(self.OnListCommand, win32ui.IDC_LIST1)
names = list(self.styles.keys())
names.sort()
for name in names:
if self.styles[name].aliased is None:
self.listbox.AddString(name)
self.listbox.SetCurSel(0)
idc = win32ui.IDC_RADIO1
if not self.scintilla._GetColorizer().bUseFixed: idc = win32ui.IDC_RADIO2
self.GetDlgItem(idc).SetCheck(1)
self.UpdateUIForStyle(self.styles[names[0]])
self.scintilla.HookFormatter(self)
self.HookCommand(self.OnButDefaultFixedFont, win32ui.IDC_BUTTON1)
self.HookCommand(self.OnButDefaultPropFont, win32ui.IDC_BUTTON2)
self.HookCommand(self.OnButThisFont, win32ui.IDC_BUTTON3)
self.HookCommand(self.OnButUseDefaultFont, win32ui.IDC_CHECK1)
self.HookCommand(self.OnButThisBackground, win32ui.IDC_BUTTON4)
self.HookCommand(self.OnButUseDefaultBackground, win32ui.IDC_CHECK2)
self.HookCommand(self.OnStyleUIChanged, win32ui.IDC_COMBO1)
self.HookCommand(self.OnStyleUIChanged, win32ui.IDC_COMBO2)
self.HookCommand(self.OnButFixedOrDefault, win32ui.IDC_RADIO1)
self.HookCommand(self.OnButFixedOrDefault, win32ui.IDC_RADIO2)
except:
import traceback
traceback.print_exc()
def OnEsc(self, ch):
self.GetParent().EndDialog(win32con.IDCANCEL)
def OnBraceMatch(self, std, extra):
import pywin.scintilla.view
pywin.scintilla.view.DoBraceMatch(self.scintilla)
def GetSelectedStyle(self):
return self.styles[self.listbox.GetText(self.listbox.GetCurSel())]
def _DoButDefaultFont(self, extra_flags, attr):
baseFormat = getattr(self.scintilla._GetColorizer(), attr)
flags = extra_flags | win32con.CF_SCREENFONTS | win32con.CF_EFFECTS | win32con.CF_FORCEFONTEXIST
d=win32ui.CreateFontDialog(baseFormat, flags, None, self)
if d.DoModal()==win32con.IDOK:
setattr(self.scintilla._GetColorizer(), attr, d.GetCharFormat())
self.OnStyleUIChanged(0, win32con.BN_CLICKED)
def OnButDefaultFixedFont(self, id, code):
if code==win32con.BN_CLICKED:
self._DoButDefaultFont(win32con.CF_FIXEDPITCHONLY, "baseFormatFixed")
return 1
def OnButDefaultPropFont(self, id, code):
if code==win32con.BN_CLICKED:
self._DoButDefaultFont(win32con.CF_SCALABLEONLY, "baseFormatProp")
return 1
def OnButFixedOrDefault(self, id, code):
if code==win32con.BN_CLICKED:
bUseFixed = id == win32ui.IDC_RADIO1
self.GetDlgItem(win32ui.IDC_RADIO1).GetCheck() != 0
self.scintilla._GetColorizer().bUseFixed = bUseFixed
self.scintilla.ApplyFormattingStyles(0)
return 1
def OnButThisFont(self, id, code):
if code==win32con.BN_CLICKED:
flags = win32con.CF_SCREENFONTS | win32con.CF_EFFECTS | win32con.CF_FORCEFONTEXIST
style = self.GetSelectedStyle()
# If the selected style is based on the default, we need to apply
# the default to it.
def_format = self.scintilla._GetColorizer().GetDefaultFormat()
format = style.GetCompleteFormat(def_format)
d=win32ui.CreateFontDialog(format, flags, None, self)
if d.DoModal()==win32con.IDOK:
style.format = d.GetCharFormat()
self.scintilla.ApplyFormattingStyles(0)
return 1
def OnButUseDefaultFont(self, id, code):
if code == win32con.BN_CLICKED:
isDef = self.butIsDefault.GetCheck()
self.GetDlgItem(win32ui.IDC_BUTTON3).EnableWindow(not isDef)
if isDef: # Being reset to the default font.
style = self.GetSelectedStyle()
style.ForceAgainstDefault()
self.UpdateUIForStyle(style)
self.scintilla.ApplyFormattingStyles(0)
else:
# User wants to override default -
# do nothing!
pass
def OnButThisBackground(self, id, code):
if code==win32con.BN_CLICKED:
style = self.GetSelectedStyle()
bg = win32api.RGB(0xff, 0xff, 0xff)
if style.background != CLR_INVALID:
bg = style.background
d=win32ui.CreateColorDialog(bg, 0, self)
if d.DoModal()==win32con.IDOK:
style.background = d.GetColor()
self.scintilla.ApplyFormattingStyles(0)
return 1
def OnButUseDefaultBackground(self, id, code):
if code == win32con.BN_CLICKED:
isDef = self.butIsDefaultBackground.GetCheck()
self.GetDlgItem(win32ui.IDC_BUTTON4).EnableWindow(not isDef)
if isDef: # Being reset to the default color
style = self.GetSelectedStyle()
style.background = style.default_background
self.UpdateUIForStyle(style)
self.scintilla.ApplyFormattingStyles(0)
else:
# User wants to override default -
# do nothing!
pass
def OnListCommand(self, id, code):
if code==win32con.LBN_SELCHANGE:
style = self.GetSelectedStyle()
self.UpdateUIForStyle(style)
return 1
def UpdateUIForStyle(self, style ):
format = style.format
sel = 0
for c in paletteVGA:
if format[4] == c[1]:
# print "Style", style.name, "is", c[0]
break
sel = sel + 1
else:
sel = -1
self.cbo.SetCurSel(sel)
self.butIsDefault.SetCheck(style.IsBasedOnDefault())
self.GetDlgItem(win32ui.IDC_BUTTON3).EnableWindow(not style.IsBasedOnDefault())
self.butIsDefaultBackground.SetCheck(style.background == style.default_background)
self.GetDlgItem(win32ui.IDC_BUTTON4).EnableWindow(style.background != style.default_background)
bold = format[1] & win32con.CFE_BOLD != 0; italic = format[1] & win32con.CFE_ITALIC != 0
self.cboBoldItalic.SetCurSel( bold*2 + italic )
def OnStyleUIChanged(self, id, code):
if code in [win32con.BN_CLICKED, win32con.CBN_SELCHANGE]:
style = self.GetSelectedStyle()
self.ApplyUIFormatToStyle(style)
self.scintilla.ApplyFormattingStyles(0)
return 0
return 1
def ApplyUIFormatToStyle(self, style):
format = style.format
color = paletteVGA[self.cbo.GetCurSel()]
effect = 0
sel = self.cboBoldItalic.GetCurSel()
if sel==0:
effect = 0
elif sel==1:
effect = win32con.CFE_ITALIC
elif sel==2:
effect = win32con.CFE_BOLD
else:
effect = win32con.CFE_BOLD | win32con.CFE_ITALIC
maskFlags=format[0]|win32con.CFM_COLOR|win32con.CFM_BOLD|win32con.CFM_ITALIC
style.format = (maskFlags, effect, style.format[2], style.format[3], color[1]) + style.format[5:]
def OnOK(self):
self.scintilla._GetColorizer().SavePreferences()
return 1
def test():
page = ColorEditorPropertyPage()
sheet = pywin.mfc.dialog.PropertySheet("Test")
sheet.AddPage(page)
sheet.CreateWindow()
|
sserrot/champion_relationships
|
venv/Lib/site-packages/pythonwin/pywin/scintilla/configui.py
|
Python
|
mit
| 9,244 | 0.034401 |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class RegisterForm(UserCreationForm):
invitation = forms.CharField(max_length=8)
class Meta:
model = User
fields = ('username', 'password1', 'password2', 'invitation')
class SettingsForm(forms.Form):
username = forms.CharField(min_length=1)
ppic = forms.ImageField(required=False)
class Meta:
model = User
fields = ('username', 'first_name','last_name','ppic',)
|
AtenrevCode/scChat
|
users/forms.py
|
Python
|
mit
| 542 | 0.00738 |
#!/usr/bin/python3 -B
exec(open("../index.py").read())
from waitress import serve
serve(application, host='0.0.0.0', port=8080, threads=1, channel_timeout=1)
|
shark555/websnake_demo
|
scripts/serve.py
|
Python
|
mit
| 161 | 0.018634 |
"""
.. math.py
Simple math movers.
"""
## Inheritance
import base
# import library.movers.pushqueue as pq
## Inifinity definition
inf = float("inf")
#########################################
## ----- Special data containers ----- ##
#########################################
class MovingMax(base.Mover):
""" Counts the current maximum of a moving data window of length *n*
(which is infinite by default).
Example:
>>> mmax = MovingMax(3)
>>> data = [6, 9, 7, 6, 6, 3, 4, 4, 6, 2]
>>> [mmax(x) for x in data]
[6, 9, 9, 9, 7, 6, 6, 4, 6, 6]
"""
def __init__(self, n=inf, **kwargs):
self.n = n
kwargs.update(patient=False)
super(MovingMax, self).__init__(**kwargs)
def _eat(self, value):
self._queue.push(value)
return self._queue.max
def _zero(self):
self._queue = pq.MaxQueue(maxlen=self.n)
@property
def max(self):
try:
return self._queue.max
except KeyError:
return None
class MovingMin(base.Mover):
""" Counts the current minimum of a moving data window of length *n*
(which is infinite by default).
Example:
>>> mmax = MovingMin(3)
>>> data = [6, 9, 7, 6, 6, 3, 4, 4, 6, 2]
>>> [mmin(x) for x in data]
[6, 6, 6, 6, 6, 3, 3, 3, 4, 2]
"""
def __init__(self, n=inf, **kwargs):
self.n = n
kwargs.update(patient=False)
super(MovingMin, self).__init__(**kwargs)
def _eat(self, value):
self._queue.push(value)
return self._queue.min
def _zero(self):
self._queue = pq.MinQueue(maxlen=self.n)
@property
def min(self):
try:
return self._queue.min
except KeyError:
return None
class MovingRatio(base.Mover):
""" A mover which return the ratio between the current value and the
last value. """
def __init__(self, n=1, **kwargs):
""" *n* is the delay factor. """
self.n = n
super(MovingRatio, self).__init__(**kwargs)
def _eat(self, value):
out = self._deque.push(value)
try:
ratio = value / out
except TypeError:
ratio = 1.0
return ratio
def _zero(self):
self._deque = self._get_deque(self.n)
class MovingSum(base.Mover):
""" Counts the accumulating sum of a moving data window of length *n*
(which is infinite by default).
Examples:
>>> msum = MovingSum()
>>> [msum(x) for x in xrange(10)]
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45]
>>> msum = MovingSum(3)
>>> [msum(x) for x in xrange(10)]
[0, 1, 3, 6, 9, 12, 15, 18, 21, 24]
"""
def __init__(self, n=inf):
self.n = n
super(MovingSum, self).__init__()
def _eat(self, value):
## Push eaten value and catch fallen value
out = self._deque.push(value)
## Increase-decrease sum
try:
self._sum += value - out
## Fallen value was None, so increase-only is made
except TypeError:
self._sum += value
## Return sum
return self._sum
def _zero(self):
self._deque = base.Deque((), maxlen=self.n)
self._sum = 0
def sgn(x):
""" Return the sign of *x*. """
return 1 if x.real > 0 else -1 if x.real < 0 else 0
class SignTracker(base.Mover):
""" Counts length of successing similar-signed values, where a 0 value
does not change trend, and ``None`` zeros the trend. By default, the
sign of a value is determined by :func:`sgn`, but the a different
sign function may be assigned to the *sgn* parameter, assuming it's a
function whose image is partial to the set ``{-1, 0, 1}``.
Basic example:
>>> tracker = SignTracker()
>>> data = [2, 3, -3, None, -1, 0, -1, 2]
>>> [tracker(value) for value in data]
[1, 2, -1, 0, -1, -2, -3, 1]
More complex example:
>>> my_sgn = lambda x: 1 if x >= 1.0 else -1 if x <= -1 else 0
>>> tracker = SignTracker(sgn=my_sgn)
>>> data = [2, 3, -0.5, None, -1, 0, -1, 1]
>>> [tracker(value) for value in data]
[1, 2, 3, 0, -1, -2, -3, 1]
"""
def __init__(self, sgn=sgn):
self.sgn = sgn
super(SignTracker, self).__init__()
def _eat(self, value):
## Get current+new signs
cur_sgn = sgn(self._count)
new_sgn = self.sgn(value)
## With trend
if cur_sgn * new_sgn >= 0:
self._count += sgn(cur_sgn + new_sgn)
return self._count
## Against trend
self._count = new_sgn
return self._count
def _zero(self):
self._count = 0
return 0
def _dffsgn(old, new):
return sgn(new-old)
class ToneTracker(base.Mover):
""" Tracks current "tone", which is defined (by default) to be the
sign of the substracting result of the value *gap* values earlier from
the current value. This may be overridden by a different *toner*,
assuming it's a function taking "old" and "new" as parameters.
Basic example:
>>> tracker = ToneTracker(gap=4)
>>> data = range(6) + range(5)[::-1]
>>> [tracker(value) for value in data]
[None, None, None, None, 1, 1, 1, 0, -1, -1, -1]
More complex example:
>>> my_toner = lambda old, new: sgn(len(new)-len(old))
>>> tracker = ToneTracker(toner=my_toner)
>>> data = [[1, 2], (3,), {4: 5, 6: 7}, range(8)]
>>> [tracker(value) for value in data]
[None, -1, 1, 1]
"""
def __init__(self, gap=1, toner=_dffsgn):
self.gap = gap
self.toner = toner
super(ToneTracker, self).__init__()
def _eat(self, value):
## Get old value
out = self._deque.push(value)
## Return relevant tone
if out is not self._deque.none:
return self.toner(out, value)
def _zero(self):
## Reset a deque
self._deque = base.Deque((), maxlen=self.gap)
class LocalExtrema(base.Mover):
""" Tracks local extremas, where "extrema" in this sense is a value which is
higher (lower) than its pre-defined neighbourhood. The neighbourhood is
defined by the parameters *left* and *right* (how many values to the left,
how many values to the right), whether this is max (1) or min (-1) is
determined by the *direction* parameter, and finally the *strict* boolean
parameter decides whether the comparison should be strict or not, when
comparing to the left.
Examples:
>>> data = [5, 6, 9, 6, 6, 8, 8, 8, 9]
>>> lmax11 = LocalExtrema(1, 1, 1, False)
>>> [lmax11(x) for x in data] ## We don't expect more than one max
[False, False, False, True, False, False, False, False, False]
>>> lmax11s = LocalExtrema(1, 1, 1, True)
>>> [lmax11s(x) for x in data]
[False, False, False, True, False, False, True, False, False]
>>> lmin11s = LocalExtrema(-1, 1, 1, True)
>>> [lmin11s(x) for x in data]
[False, False, False, False, True, False, False, False, False]
>>> lmin21 = LocalExtrema(-1, 2, 1, False)
>>> [lmin21(x) for x in data]
[False, False, False, False, False, True, False, False, True]
"""
def __init__(self, direction=1, left=1, right=1, strict=True):
self.direction = direction
self._ext = max if self.direction == 1 else min
self.left = left
self.right = right
self.strict = strict
super(LocalExtrema, self).__init__()
def _cmp(self, a, b):
return sgn(a - b) * self.direction >= int(self.strict)
def _eat(self, value):
## There is no candidate
if self._c is None:
## The left deque is full
if self._ld.isfull():
## The new value is a candidate
if self._cmp(value, self._ext(self._ld)):
self._c = value
## Push and return
self._ld.append(value)
return False
## Push
self._ld.append(value)
## We replace current candidate
if self._cmp(value, self._c):
self._empty_rd()
self._c = value
return False
## We continue with the current candidate
self._rd.append(value)
## Candidate has not yet won
if not self._rd.isfull():
return False
## Candidate has won
self._empty_rd()
self._del_c()
return True
def _empty_ld(self):
self._ld = base.Deque((), maxlen=self.left)
def _empty_rd(self):
self._rd = base.Deque((), maxlen=self.right)
def _del_c(self):
self._c = None
def _zero(self):
## Reset the deques and the candidate
self._empty_ld()
self._empty_rd()
self._del_c()
## Naive version, until I'll find a better one
class SignModCounter(base.Mover):
def __init__(self, n=inf, **kwargs):
self.n = n
kwargs.update(patient=False)
super(SignModCounter, self).__init__(**kwargs)
def _eat(self, value):
self._deque.push(value)
state = 0
mods = 0
for e in self._deque:
if state * e == -1:
mods += 1
if e:
state = e
return mods
def _zero(self):
self._deque = base.Deque(maxlen=self.n)
return 0
|
pelegm/movers
|
math.py
|
Python
|
unlicense
| 9,322 | 0.002789 |
"""
taskmaster.controller
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import cPickle as pickle
import gevent
import sys
from gevent_zeromq import zmq
from gevent.queue import Queue, Empty
from os import path, unlink, rename
from taskmaster.util import import_target
class Server(object):
def __init__(self, address, size=None):
self.daemon = True
self.started = False
self.size = size
self.queue = Queue(maxsize=size)
self.address = address
self.context = zmq.Context(1)
self.server = None
def send(self, cmd, data=''):
self.server.send_multipart([cmd, data])
def recv(self):
reply = self.server.recv_multipart()
assert len(reply) == 2
return reply
def bind(self):
if self.server:
self.server.close()
print "Taskmaster binding to %r" % self.address
self.server = self.context.socket(zmq.REP)
self.server.bind(self.address)
def start(self):
self.started = True
self.bind()
while self.started:
gevent.sleep(0)
cmd, data = self.recv()
if cmd == 'GET':
if not self.has_work():
self.send('QUIT')
continue
try:
job = self.queue.get_nowait()
except Empty:
self.send('WAIT')
continue
self.send('OK', pickle.dumps(job))
elif cmd == 'DONE':
self.queue.task_done()
if self.has_work():
self.send('OK')
else:
self.send('QUIT')
else:
self.send('ERROR', 'Unrecognized command')
self.shutdown()
def put_job(self, job):
return self.queue.put(job)
def first_job(self):
return self.queue.queue[0]
def get_current_size(self):
return self.queue.qsize()
def get_max_size(self):
return self.size
def has_work(self):
return not self.queue.empty()
def is_alive(self):
return self.started
def shutdown(self):
if not self.started:
return
self.server.close()
self.context.term()
self.started = False
class Controller(object):
def __init__(self, server, target, state_file=None, progressbar=True):
if isinstance(target, basestring):
target = import_target(target, 'get_jobs')
if not state_file:
target_file = sys.modules[target.__module__].__file__
state_file = path.join(path.dirname(target_file),
'%s.state' % (path.basename(target_file),))
self.server = server
self.target = target
self.state_file = state_file
if progressbar:
self.pbar = self.get_progressbar()
else:
self.pbar = None
def get_progressbar(self):
from taskmaster.progressbar import Counter, Speed, Timer, ProgressBar, UnknownLength, Value
sizelen = len(str(self.server.size))
format = 'In-Queue: %%-%ds / %%-%ds' % (sizelen, sizelen)
queue_size = Value(callback=lambda x: format % (self.server.get_current_size(), self.server.get_max_size()))
widgets = ['Completed Tasks: ', Counter(), ' | ', queue_size, ' | ', Speed(), ' | ', Timer()]
pbar = ProgressBar(widgets=widgets, maxval=UnknownLength)
return pbar
def read_state(self):
if path.exists(self.state_file):
print "Reading previous state from %r" % self.state_file
with open(self.state_file, 'r') as fp:
try:
return pickle.load(fp)
except EOFError:
pass
except Exception, e:
print "There was an error reading from state file. Ignoring and continuing without."
import traceback
traceback.print_exc()
print e
return {}
def update_state(self, job_id, job, fp=None):
last_job_id = getattr(self, '_last_job_id', None)
if self.pbar:
self.pbar.update(job_id)
if job_id == last_job_id:
return
if not job:
return
last_job_id = job_id
data = {
'job': job,
'job_id': job_id,
}
with open(self.state_file + '.tmp', 'w') as fp:
pickle.dump(data, fp)
rename(self.state_file + '.tmp', self.state_file)
def state_writer(self):
while self.server.is_alive():
gevent.sleep(0)
try:
job_id, job = self.server.first_job()
except IndexError:
self.update_state(None, None)
continue
self.update_state(job_id, job)
def reset(self):
if path.exists(self.state_file):
unlink(self.state_file)
def start(self):
kwargs = {}
last_job = self.read_state()
if last_job:
kwargs['last'] = last_job['job']
start_id = last_job['job_id']
else:
start_id = 0
gevent.spawn(self.server.start)
gevent.sleep(0)
if self.pbar:
self.pbar.start()
self.pbar.update(start_id)
state_writer = gevent.spawn(self.state_writer)
job_id, job = (None, None)
for job_id, job in enumerate(self.target(**kwargs), start_id):
self.server.put_job((job_id, job))
gevent.sleep(0)
while self.server.has_work():
gevent.sleep(0)
# Give clients a few seconds to receive a DONE message
gevent.sleep(3)
self.server.shutdown()
state_writer.join(1)
self.update_state(job_id, job)
if self.pbar:
self.pbar.finish()
|
alex/taskmaster
|
src/taskmaster/server.py
|
Python
|
apache-2.0
| 6,026 | 0.00083 |
#!/usr/bin/env python
import os.path
import re
from setuptools import Command, find_packages, setup
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys,subprocess
errno = subprocess.call([sys.executable, "runtests.py"])
raise SystemExit(errno)
def get_version():
"""build_version is replaced with the current build number
as part of the jenkins build job"""
build_version = 1
return build_version
def parse_requirements(file_name):
"""Taken from http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy"""
requirements = []
for line in open(os.path.join(os.path.dirname(__file__), "config", file_name), "r"):
line = line.strip()
# comments and blank lines
if re.match(r"(^#)|(^$)", line):
continue
requirements.append(line)
return requirements
setup(
name="pettingzoo",
version="0.3.0" % get_version(),
url = "https://wiki.knewton.net/index.php/Tech",
author="Devon Jones",
author_email="devon@knewton.com",
license = "Apache",
packages=find_packages(),
scripts = ["bin/zncreate", "bin/zndelete"],
cmdclass = {"test": PyTest},
package_data = {"config": ["requirements*.txt"]},
install_requires=parse_requirements("requirements.txt"),
tests_require=parse_requirements("requirements.testing.txt"),
description = "Python edition of the PettingZoo framework.",
long_description = "\n" + open("README").read(),
)
|
Knewton/pettingzoo-python
|
setup.py
|
Python
|
apache-2.0
| 1,447 | 0.041465 |
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
def rgb_to_hsv(rgb_list):
rgb_normalized = [1.0*rgb_list[0]/255, 1.0*rgb_list[1]/255, 1.0*rgb_list[2]/255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized
def compute_color_histograms(cloud, using_hsv=False):
# Compute histograms for the clusters
point_colors_list = []
# Step through each point in the point cloud
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
# Populate lists with color values
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
# Compute histograms
# Take histograms in R, G, and B or similar channels (HSV etc.)
r_hist = np.histogram(channel_1_vals, bins=32, range=(0, 256))
g_hist = np.histogram(channel_2_vals, bins=32, range=(0, 256))
b_hist = np.histogram(channel_3_vals, bins=32, range=(0, 256))
# Concatenate and normalize the histograms
hist_features = np.concatenate((r_hist[0], g_hist[0], b_hist[0])).astype(np.float64)
normed_features = hist_features / np.sum(hist_features)
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
for norm_component in pc2.read_points(normal_cloud,
field_names = ('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
# Compute histograms of normal values (just like with color)
x_hist = np.histogram(norm_x_vals, bins=32, range=(0, 256))
y_hist = np.histogram(norm_y_vals, bins=32, range=(0, 256))
z_hist = np.histogram(norm_z_vals, bins=32, range=(0, 256))
# Concatenate and normalize the histograms
hist_features = np.concatenate((x_hist[0], y_hist[0], z_hist[0])).astype(np.float64)
normed_features = hist_features / np.sum(hist_features)
return normed_features
|
squared9/Robotics
|
Robotic_PR2_3D_Perception_Pick_And_Place/sensor_stick/src/sensor_stick/features.py
|
Python
|
mit
| 2,473 | 0.002831 |
axes = az.plot_forest(non_centered_data,
kind='ridgeplot',
var_names=['theta'],
combined=True,
ridgeplot_overlap=3,
colors='white',
figsize=(9, 7))
axes[0].set_title('Estimated theta for 8 schools model')
|
mcmcplotlib/mcmcplotlib
|
api/generated/arviz-plot_forest-3.py
|
Python
|
apache-2.0
| 367 | 0.016349 |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas import Index, MultiIndex
@pytest.fixture
def idx():
# a MultiIndex used to test the general functionality of the
# general functionality of this object
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index_names = ['first', 'second']
mi = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=index_names, verify_integrity=False)
return mi
@pytest.fixture
def idx_dup():
# compare tests/indexes/multi/conftest.py
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 0, 1, 1])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index_names = ['first', 'second']
mi = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=index_names, verify_integrity=False)
return mi
@pytest.fixture
def index_names():
# names that match those in the idx fixture for testing equality of
# names assigned to the idx
return ['first', 'second']
@pytest.fixture
def holder():
# the MultiIndex constructor used to base compatibility with pickle
return MultiIndex
@pytest.fixture
def compat_props():
# a MultiIndex must have these properties associated with it
return ['shape', 'ndim', 'size']
|
cython-testbed/pandas
|
pandas/tests/indexes/multi/conftest.py
|
Python
|
bsd-3-clause
| 1,577 | 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <mail@martine-lenders.eu>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import argparse
import os, sys
import random
import pexpect
import subprocess
import time
import types
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func != None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env != None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset == None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env != None:
env.update(env)
env.update(board.to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(u"[A-Za-z0-9]{2}[0-9]+: inet6 (fe80::[0-9a-f:]+)")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawn("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"000000 60 00 00 00 00 08 %s ff fe 80 00 00 00 00 00 00" % hex(ipprot)[2:])
receiver.expect(u"000010( [0-9a-f]{2}){8} fe 80 00 00 00 00 00 00")
receiver.expect(u"000020( [0-9a-f]{2}){8} 01 23 45 67 89 ab cd ef")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawn("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact(u"Success: send 3 byte to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"000000 ab cd ef")
def test_dual_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawn("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, port))
sender.expect_exact(u"Success: send 2 byte to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"000000 60 00 00 00 00 04 %s ff fe 80 00 00 00 00 00 00" % hex(ipprot)[2:])
receiver.expect(u"000010( [0-9a-f]{2}){8} fe 80 00 00 00 00 00 00")
receiver.expect(u"000020( [0-9a-f]{2}){8} 01 02 03 04")
if __name__ == "__main__":
del os.environ['TERMFLAGS']
TestStrategy().execute([BoardGroup((Board("native", "tap0"), \
Board("native", "tap1")))], \
[test_ipv6_send, test_udpv6_send, test_dual_send])
|
alignan/RIOT
|
tests/lwip/tests/01-run.py
|
Python
|
lgpl-2.1
| 9,890 | 0.003539 |
from ij import IJ
from ij.gui import NonBlockingGenericDialog
from ij import WindowManager
from ij.gui import WaitForUserDialog
from ij import ImageStack
from ij import ImagePlus
theImage = IJ.getImage()
sourceImages = []
if theImage.getNChannels() == 1:
IJ.run("8-bit")
sourceImages.append(theImage)
else:
sourceImages = ChannelSplitter.split(theImage)
sourceNames = []
for im in sourceImages:
im.show()
sourceNames.append(im.getTitle())
gd0 = NonBlockingGenericDialog("Select source image...")
gd0.addChoice("Source image",sourceNames,sourceNames[0])
gd0.showDialog()
if (gd0.wasOKed()):
chosenImage = gd0.getNextChoice()
theImage = WindowManager.getImage(chosenImage)
IJ.selectWindow(chosenImage)
else:
theImage = sourceImages[0]
IJ.selectWindow(sourceNames[0])
gd = NonBlockingGenericDialog("Set slice params...")
gd.addNumericField("Slice start:",1,0)
gd.addNumericField("Slice end:",theImage.getNSlices(),0)
gd.showDialog()
if (gd.wasOKed()):
## Selecting the ROI over the stack
startSlice = int(gd.getNextNumber())
endSlice = gd.getNextNumber()
width = theImage.getWidth()
height = theImage.getHeight()
roiArray = []
for i in range(startSlice,endSlice+1):
theImage.setSlice(i)
bp = theImage.getProcessor().duplicate()
bp.setColor(0)
doStaySlice = True
while doStaySlice:
waiter = WaitForUserDialog("Draw ROI","Draw ROI, then hit OK")
waiter.show()
roi = theImage.getRoi()
if roi is None:
doStaySlice = True
else:
doStaySlice = False
roiArray.append(roi)
## Applying the ROI to each channel
newStacks = []
castImages = []
for procImage in sourceImages:
newStacks.append(ImageStack(width,height))
ns = newStacks[-1]
for i in range(startSlice,endSlice+1):
procImage.setSliceWithoutUpdate(i)
bp = procImage.getProcessor().duplicate()
bp.fillOutside(roiArray[i-startSlice])
ns.addSlice(bp)
castImages.append(ImagePlus(procImage.getShortTitle()+"_cast",ns))
## Displays the output
for castImage in castImages:
castImage.show()
## Cleans up the windows
for sourceImage in sourceImages:
sourceImage.close()
|
stalepig/deep-mucosal-imaging
|
dmi_0.3/Isolate_stack_ROI2.py
|
Python
|
gpl-2.0
| 2,120 | 0.034906 |
"""The tests for the Script component."""
# pylint: disable=too-many-public-methods,protected-access
from datetime import timedelta
from unittest import mock
import unittest
# Otherwise can't test just this file (import order issue)
import homeassistant.components # noqa
import homeassistant.util.dt as dt_util
from homeassistant.helpers import script, config_validation as cv
from tests.common import fire_time_changed, get_test_home_assistant
ENTITY_ID = 'script.test'
class TestScriptHelper(unittest.TestCase):
"""Test the Script component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_firing_event(self):
"""Test the firing of events."""
event = 'test_event'
calls = []
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA({
'event': event,
'event_data': {
'hello': 'world'
}
}))
script_obj.run()
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data.get('hello') == 'world'
assert not script_obj.can_cancel
def test_calling_service(self):
"""Test the calling of a service."""
calls = []
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
script.call_from_config(self.hass, {
'service': 'test.script',
'data': {
'hello': 'world'
}
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data.get('hello') == 'world'
def test_calling_service_template(self):
"""Test the calling of a service."""
calls = []
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
script.call_from_config(self.hass, {
'service_template': """
{% if True %}
test.script
{% else %}
test.not_script
{% endif %}""",
'data_template': {
'hello': """
{% if True %}
world
{% else %}
Not world
{% endif %}
"""
}
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data.get('hello') == 'world'
def test_delay(self):
"""Test the delay."""
event = 'test_event'
events = []
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{'delay': {'seconds': 5}},
{'event': event}]))
script_obj.run()
self.hass.block_till_done()
assert script_obj.is_running
assert script_obj.can_cancel
assert script_obj.last_action == event
assert len(events) == 1
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(events) == 2
def test_delay_template(self):
"""Test the delay as a template."""
event = 'test_evnt'
events = []
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{'delay': '00:00:{{ 5 }}'},
{'event': event}]))
script_obj.run()
self.hass.block_till_done()
assert script_obj.is_running
assert script_obj.can_cancel
assert script_obj.last_action == event
assert len(events) == 1
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(events) == 2
def test_cancel_while_delay(self):
"""Test the cancelling while the delay is present."""
event = 'test_event'
events = []
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'delay': {'seconds': 5}},
{'event': event}]))
script_obj.run()
self.hass.block_till_done()
assert script_obj.is_running
assert len(events) == 0
script_obj.stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(events) == 0
def test_passing_variables_to_script(self):
"""Test if we can pass variables to script."""
calls = []
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{
'service': 'test.script',
'data_template': {
'hello': '{{ greeting }}',
},
},
{'delay': {'seconds': 5}},
{
'service': 'test.script',
'data_template': {
'hello': '{{ greeting2 }}',
},
}]))
script_obj.run({
'greeting': 'world',
'greeting2': 'universe',
})
self.hass.block_till_done()
assert script_obj.is_running
assert len(calls) == 1
assert calls[-1].data['hello'] == 'world'
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(calls) == 2
assert calls[-1].data['hello'] == 'universe'
def test_condition(self):
"""Test if we can use conditions in a script."""
event = 'test_event'
events = []
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
self.hass.states.set('test.entity', 'hello')
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state == "hello" }}',
},
{'event': event},
]))
script_obj.run()
self.hass.block_till_done()
assert len(events) == 2
self.hass.states.set('test.entity', 'goodbye')
script_obj.run()
self.hass.block_till_done()
assert len(events) == 3
@mock.patch('homeassistant.helpers.script.condition.async_from_config')
def test_condition_created_once(self, async_from_config):
"""Test that the conditions do not get created multiple times."""
event = 'test_event'
events = []
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
self.hass.states.set('test.entity', 'hello')
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state == "hello" }}',
},
{'event': event},
]))
script_obj.run()
script_obj.run()
self.hass.block_till_done()
assert async_from_config.call_count == 1
assert len(script_obj._config_cache) == 1
def test_all_conditions_cached(self):
"""Test that multiple conditions get cached."""
event = 'test_event'
events = []
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
self.hass.states.set('test.entity', 'hello')
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state == "hello" }}',
},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state != "hello" }}',
},
{'event': event},
]))
script_obj.run()
self.hass.block_till_done()
assert len(script_obj._config_cache) == 2
|
Smart-Torvy/torvy-home-assistant
|
tests/helpers/test_script.py
|
Python
|
mit
| 9,704 | 0 |
# This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <aleksandar.erkalovic@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from booki.editor import models
from booki.utils import security
def remote_get_status_messages(request, message, groupid):
from booki.statusnet.models import searchMessages
group = models.BookiGroup.objects.get(url_name=groupid)
mess = searchMessages('%%23%s' % group.url_name)
# remove this hard code
messages = ['<a href="http://status.flossmanuals.net/notice/%s">%s: %s</a>' % (m['id'], m['from_user'], m['text']) for m in mess['results']]
return {"list": messages}
def remote_init_group(request, message, groupid):
import sputnik
## get online users
try:
_onlineUsers = sputnik.smembers("sputnik:channel:%s:users" % message["channel"])
except:
_onlineUsers = []
if request.user.username not in _onlineUsers:
try:
sputnik.sadd("sputnik:channel:%s:users" % message["channel"], request.user.username)
except:
pass
return {}
def remote_leave_group(request, message, groupid):
group = models.BookiGroup.objects.get(url_name=groupid)
group.members.remove(request.user)
transaction.commit()
return {"result": True}
def remote_join_group(request, message, groupid):
group = models.BookiGroup.objects.get(url_name=groupid)
group.members.add(request.user)
transaction.commit()
return {"result": True}
|
aerkalov/Booktype
|
lib/booki/channels/group.py
|
Python
|
agpl-3.0
| 2,141 | 0.004671 |
#!/usr/bin/env python
import subprocess
import re
import os
import errno
import collections
import sys
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k,v = kv
ret[k] = v
return ret
sim_sdk_info = sdkinfo('iphonesimulator')
device_sdk_info = sdkinfo('iphoneos')
def latest_sdks():
latest_sim = None
latest_device = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'Simulator' in line:
latest_sim = match.group(1)
elif 'iOS' in line:
latest_device = match.group(1)
return latest_sim, latest_device
sim_sdk, device_sdk = latest_sdks()
class simulator_platform(Platform):
sdk='iphonesimulator'
arch = 'i386'
name = 'simulator'
triple = 'i386-apple-darwin10'
sdkroot = sim_sdk_info['Path']
prefix = "#if !defined(__arm__) && defined(__i386__)\n\n"
suffix = "\n\n#endif"
class device_platform(Platform):
sdk='iphoneos'
name = 'ios'
arch = 'armv7'
triple = 'arm-apple-darwin10'
sdkroot = device_sdk_info['Path']
prefix = "#ifdef __arm__\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root,src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
file_suffix = None
if file.endswith('.h'):
if dest_include_dir:
file_suffix = arch
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'arm':
move_dir(arch='arm',
prefix="#ifdef __arm__\n\n",
suffix="\n\n#endif",
files=files)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if !defined(__arm__) && defined(__i386__)\n\n",
suffix="\n\n#endif",
files=files)
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -miphoneos-version-min=4.0' % (platform.arch, platform.sdkroot))
working_dir=os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../ios/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../ios/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'ios/src', 'ios/include')
move_source_tree('include', None, 'ios/include')
build_target(simulator_platform)
build_target(device_platform)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('ios/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
|
teeple/pns_server
|
work/install/Python-2.7.4/Modules/_ctypes/libffi/generate-ios-source-and-headers.py
|
Python
|
gpl-2.0
| 5,303 | 0.005846 |
from itertools import product, repeat, chain, ifilter, imap
from multiprocessing import Pool, cpu_count
from sklearn.preprocessing import binarize
from utils.profiling import profile
from numpy.random import randint
from functools import partial
from random import sample
import numpy as np
import logging
logger = logging.getLogger(__name__)
def on_edge(mask, x, y):
""" checks if the point defined with coordinates x and y lies on the edge
of a pedestrian on the pedestrian mask
to lie on an edge is defined by having at least one non-pedestrian
pixel in the surrouding of the given pixel (left, right, up or down)
e.g. we're checking if the middle pixel is an edge one
0 1 1
... 0 1 1 ... -> on_edge? True
0 0 1
0 1 1
... 1 1 1 ... -> on_edge? False
0 1 1
mask: array-like
two dimensional array of binarized pixels
x: int
point coordinate on the x-axis
y: int
point coordinate on the y-axis
returns: bool
a boolean stating weather the point lies on the edge of the
pedestrian
"""
return mask[x, y] and not all([mask[x + dx, y + dy] for dx, dy in
zip([-1, 1, 0, 0], [0, 0, -1, 1])])
def window(image, d, x, y):
""" extracts a window of size d pixels in each direction from the point
defined by x and y coordinates
total number of pixels in the window is (2 * d + 1)^2
window is binarized if not already binary and reshaped to a vector
e.g. d = 2, P(x, y)
0 0 0 0 0 0 0 0 0 0
-----------
0 | 0 0 0 0 0 | 0 0 1 1
0 | 0 0 0 0 0 | 1 1 1 1
0 | 0 0 P 0 1 | 1 1 1 1
0 | 0 0 0 0 1 | 1 1 1 1
0 | 0 0 0 1 1 | 1 1 1 1
-----------
0 0 0 1 1 1 1 1 1 1
0 0 0 0 1 1 1 1 1 1
0 0 0 1 1 1 1 1 1 1
image: array-like
two dimensional array
d: int
number of pixels to take in each direction from the center
x: int
x coordinate of the window center
y: int
x coordinate of the window center
returns: array
a binary array with (2 * d + 1)^2 elements
"""
b, g, r = image
w = lambda img: img[(x - d):(x + d + 1), (y - d):(y + d + 1)].reshape(-1)
return binarize(np.hstack([w(b), w(g), w(r)]), 0.5)
def samples(s, d):
""" generates classifier input samples from a dataset sample
s: object
compatible with the sample class interface
d: int
number of pixels to take in each direction from the center when
generating an input sample via the moving window
return: list of tuples
tuples contain two elements, an input vector and a target vector
extracted from a given sample
"""
shape = s.proc[0].shape
# generate a cartesian product of all possible point coordinates given
# image shape and offset d
# filter out all points not representing pedestrian mask edges
# compute input vectors for all remaining points from their respective
# windows
positive = imap(lambda xy: (window(s.proc, d, *xy), 1),
ifilter(lambda xy: on_edge(s.mask, *xy),
product(*map(lambda x: xrange(d, x - d),
shape))))
# create an infinite uniform random sampling list of point coordinates
# inside the given image
# filter out all points representing positive examples to get an infinite
# list of point coordinates representing negative examples
# compute input vectors for all points from their respective windows
negative = imap(lambda xy: (window(s.proc, d, *xy), 0),
ifilter(lambda xy: not s.mask.item(*xy),
imap(lambda o: map(lambda x: randint(d, x - d), o),
repeat(shape))))
# zip a finite list of positive examples and an infinite list of negative
# examples to get an equal amount of positive and negative examples and has
# a length of len(positive)
# chain all the zipped elements to get a flattened list of examples
# containing both positive and negative examples in one list
return list(chain(*zip(positive, negative)))
def generate(dataset, w, threaded=True):
""" generate a list of classifier data samples from all dataset samples
with a parallel implementation using a thread pool
dataset: object
object containing samples list compatible with the sample class
interface
w: int
size of the window used to extract features from an image
must be an odd number
returns: iterator
iterator contains all the positive and negative data samples
generated from the dataset
"""
if not threaded:
logger.info('extracting samples using 1 thread')
return chain(*map(partial(samples, d=(w - 1) / 2), dataset.samples))
logger.info('extracting samples using {0} threads'.format(cpu_count()))
return chain(*pool.map(partial(samples, d=(w - 1) / 2), dataset.samples))
@profile
def extract(dataset, w=11, N=25000, threaded=True):
""" extracts the training inputs and targets from the dataset
dataset: object
object containing samples list compatible with the sample class
interface
w: int
size of the window used to extract features from an image
must be an odd number
N: int
the number of samples to extract from the dataset. samples are
extracted randomly from the list of all possible samples
must be positive
returns: tuple of numpy arrays
the tuple contains two numpy arrays, one represents an input two
dimensional array and the other one represents a target vector
"""
assert(w % 2 == 1)
assert(N > 0)
# generates a list of data samples used in the model training
#
# randomly samples the list of samples and returns a maximum of
# N data samples as tuples of (input, target) vectors
#
# zips the sample tuples to divide input vectors in a separate tuple and
# target vectors in a separate tuple
ins, ts = zip(*sample(list(generate(dataset, w, threaded=threaded)), N))
# vertically concatenates list of numpy arrays and concatenates a list
# of target vectors to a numpy array
return (np.vstack(ins), np.array(ts))
# process pool for concurrent sample generation
pool = Pool(cpu_count())
|
dominiktomicevic/pedestrian
|
classifier/extractor.py
|
Python
|
mit
| 6,723 | 0.000149 |
#!/bin/env python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests that verify that the various test_types (e.g., simplified_diff)
are working."""
import difflib
import os
import unittest
from test_types import simplified_text_diff
class SimplifiedDiffUnittest(unittest.TestCase):
def testSimplifiedDiff(self):
"""Compares actual output with expected output for some test cases. The
simplified diff of these test cases should be the same."""
test_names = [
'null-offset-parent',
'textAreaLineHeight',
'form-element-geometry',
]
differ = simplified_text_diff.SimplifiedTextDiff(None)
for prefix in test_names:
output_filename = os.path.join(self.GetTestDataDir(),
prefix + "-actual-win.txt")
expected_filename = os.path.join(self.GetTestDataDir(),
prefix + "-expected.txt")
output = differ._SimplifyText(open(output_filename).read())
expected = differ._SimplifyText(open(expected_filename).read())
if output != expected:
lst = difflib.unified_diff(expected.splitlines(True),
output.splitlines(True),
'expected',
'actual')
for line in lst:
print line.rstrip()
self.failUnlessEqual(output, expected)
def GetTestDataDir(self):
return os.path.join(os.path.abspath('testdata'), 'difftests')
if '__main__' == __name__:
unittest.main()
|
amyvmiwei/chromium
|
webkit/tools/layout_tests/layout_package/test_types_unittest.py
|
Python
|
bsd-3-clause
| 1,667 | 0.007798 |
from erukar.system.engine import Enemy, BasicAI
from ..templates.Undead import Undead
from erukar.content.inventory import Shortsword, Buckler
from erukar.content.modifiers import Steel, Oak
class Skeleton(Undead):
ClassName = 'Skeleton'
ClassLevel = 1
BaseMitigations = {
'bludgeoning': (-0.25, 0),
'piercing': (0.2, 0),
'slashing': (0.15, 0)
}
BriefDescription = "a skeleton holding a {describe|right} and a {describe|left}."
def init_stats(self):
self.strength = 5
self.dexterity = 4
self.vitality = -1
self.acuity = -2
self.sense = -2
def init_personality(self):
self.ai_module = BasicAI(self)
self.str_ratio = 0.4
self.dex_ratio = 0.3
self.vit_ratio = 0.2
self.acu_ratio = 0.0
self.sen_ratio = 0.0
self.res_ratio = 0.1
self.stat_points = 8
def init_inventory(self):
self.left = Buckler(modifiers=[Oak])
self.right = Shortsword(modifiers=[Steel])
self.inventory = [self.left, self.right]
|
etkirsch/legends-of-erukar
|
erukar/content/enemies/undead/Skeleton.py
|
Python
|
agpl-3.0
| 1,103 | 0.012693 |
from django.contrib import sitemaps
from django.core.urlresolvers import reverse
from .models import Artist, Song, User
class StaticViewSitemap(sitemaps.Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return ['index', 'popular', 'recently_added', 'search', 'contact']
def location(self, item):
return reverse('chords:' + item)
class SongSitemap(sitemaps.Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Song.objects.filter(published=True)
def lastmod(self, obj):
return obj.mod_date
class ArtistSitemap(sitemaps.Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Artist.objects.all()
class UserSitemap(sitemaps.Sitemap):
changefreq = "monthly"
priority = 0.5
def items(self):
return User.objects.all()
def location(self, user):
return reverse('chords:user', args=(user.get_username(),))
|
Ilias95/guitarchords
|
chords/sitemaps.py
|
Python
|
mit
| 977 | 0 |
# -*- coding: utf-8 -*-
#
# SRL 5 documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 16 15:51:55 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.pngmath', 'sphinx.ext.jsmath']
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SRL 5'
copyright = u'2010, SRL Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'SRL5doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SRL5.tex', u'SRL 5 Documentation',
u'SRL Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
SRL/SRL-5
|
doc/sphinx/conf.py
|
Python
|
gpl-3.0
| 6,360 | 0.006918 |
#
# ImageViewAgg.py -- a backend for Ginga using the aggdraw library
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import numpy
from io import BytesIO
import aggdraw as agg
from . import AggHelp
from ginga import ImageView
from ginga.aggw.CanvasRenderAgg import CanvasRenderer
try:
import PIL.Image as PILimage
have_PIL = True
except ImportError:
have_PIL = False
class ImageViewAggError(ImageView.ImageViewError):
pass
class ImageViewAgg(ImageView.ImageViewBase):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageView.ImageViewBase.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings)
self.surface = None
self._rgb_order = 'RGBA'
self.renderer = CanvasRenderer(self)
self.message = None
# cursors
self.cursor = {}
self.t_.setDefaults(show_pan_position=False,
onscreen_ff='Sans Serif')
def get_surface(self):
return self.surface
def render_image(self, rgbobj, dst_x, dst_y):
"""Render the image represented by (rgbobj) at dst_x, dst_y
in the pixel space.
"""
if self.surface is None:
return
canvas = self.surface
self.logger.debug("redraw surface")
# get window contents as a buffer and load it into the AGG surface
rgb_buf = self.getwin_buffer(order=self._rgb_order)
canvas.fromstring(rgb_buf)
cr = AggHelp.AggContext(canvas)
# Draw a cross in the center of the window in debug mode
if self.t_['show_pan_position']:
ctr_x, ctr_y = self.get_center()
pen = cr.get_pen('red')
canvas.line((ctr_x - 10, ctr_y, ctr_x + 10, ctr_y), pen)
canvas.line((ctr_x, ctr_y - 10, ctr_x, ctr_y + 10), pen)
# render self.message
if self.message:
font = cr.get_font(self.t_['onscreen_ff'], 24.0, self.img_fg)
wd, ht = cr.text_extents(self.message, font)
imgwin_wd, imgwin_ht = self.get_window_size()
y = ((imgwin_ht // 3) * 2) - (ht // 2)
x = (imgwin_wd // 2) - (wd // 2)
canvas.text((x, y), self.message, font)
# for debugging
#self.save_rgb_image_as_file('/tmp/temp.png', format='png')
def configure_surface(self, width, height):
# create agg surface the size of the window
self.surface = agg.Draw("RGBA", (width, height), 'black')
# inform the base class about the actual window size
self.configure(width, height)
def get_image_as_array(self):
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
# TODO: could these have changed between the time that self.surface
# was last updated?
wd, ht = self.get_window_size()
# Get agg surface as a numpy array
surface = self.get_surface()
arr8 = numpy.fromstring(surface.tostring(), dtype=numpy.uint8)
arr8 = arr8.reshape((ht, wd, 4))
return arr8
def get_image_as_buffer(self, output=None):
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
obuf = output
if obuf is None:
obuf = BytesIO()
surface = self.get_surface()
obuf.write(surface.tostring())
if not (output is None):
return None
return obuf.getvalue()
def get_rgb_image_as_buffer(self, output=None, format='png', quality=90):
if not have_PIL:
raise ImageViewAggError("Please install PIL to use this method")
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
obuf = output
if obuf is None:
obuf = BytesIO()
# Get current surface as an array
arr8 = self.get_image_as_array()
# make a PIL image
image = PILimage.fromarray(arr8)
image.save(obuf, format=format, quality=quality)
if not (output is None):
return None
return obuf.getvalue()
def get_rgb_image_as_bytes(self, format='png', quality=90):
buf = self.get_rgb_image_as_buffer(format=format, quality=quality)
return buf
def save_rgb_image_as_file(self, filepath, format='png', quality=90):
if not have_PIL:
raise ImageViewAggError("Please install PIL to use this method")
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
with open(filepath, 'w') as out_f:
self.get_rgb_image_as_buffer(output=out_f, format=format,
quality=quality)
self.logger.debug("wrote %s file '%s'" % (format, filepath))
def update_image(self):
# subclass implements this method to actually update a widget
# from the agg surface
self.logger.warning("Subclass should override this method")
return False
def set_cursor(self, cursor):
# subclass implements this method to actually set a defined
# cursor on a widget
self.logger.warning("Subclass should override this method")
def reschedule_redraw(self, time_sec):
# subclass implements this method to call delayed_redraw() after
# time_sec
self.delayed_redraw()
def define_cursor(self, ctype, cursor):
self.cursor[ctype] = cursor
def get_cursor(self, ctype):
return self.cursor[ctype]
def switch_cursor(self, ctype):
self.set_cursor(self.cursor[ctype])
def get_rgb_order(self):
return self._rgb_order
def onscreen_message(self, text, delay=None):
# subclass implements this method using a timer
self.logger.warning("Subclass should override this method")
def show_pan_mark(self, tf):
self.t_.set(show_pan_position=tf)
self.redraw(whence=3)
#END
|
rupak0577/ginga
|
ginga/aggw/ImageViewAgg.py
|
Python
|
bsd-3-clause
| 6,082 | 0.000658 |
# -*- coding: utf-8 -*-
import os
import os.path
import tempfile
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.dispatch import receiver
from registration.signals import user_activated
from imagekit.models import ImageSpecField
from imagekit.processors import Resize
from PIL import Image
class Kategorie(models.Model):
title = models.CharField(max_length=50)
slug = models.CharField(max_length=20, unique=True)
intro = models.TextField(blank=True)
details = models.TextField(blank=True)
position = models.IntegerField(default=0)
def __unicode__(self):
return self.title
class WhiteBackground(object):
def process(self, image):
bg = Image.new("RGBA", image.size, (255, 255, 255, 255))
bg.paste(image, (0,0), )
return bg
class Kapitel(models.Model):
'''Ein Kapitel des "Tutorials".'''
KATEGORIE_CHOICES = [('0', 'Einstieg'),
('1', 'Schleifen'),
('2', 'Funktionen'),
('3', 'Bedingungen'),
('4', 'Für Fortgeschrittene'),
]
titel = models.CharField(max_length=50)
zusammenfassung = models.TextField(max_length=500)
kategorie = models.CharField(choices=KATEGORIE_CHOICES, max_length=2)
platz = models.FloatField()
quelltext = models.TextField(max_length=50000)
doccoHTML = models.TextField(max_length=100000)
bild = models.ImageField(upload_to='tutorial')
docbild = ImageSpecField([WhiteBackground(), Resize(400, 400), ], source='bild', format='JPEG', )
thumbnail = ImageSpecField([WhiteBackground(), Resize(80, 80), ], source='bild', format='JPEG', )
def __unicode__(self):
return u'%s (%s/%d)' % (self.titel, self.kategorie, self.platz)
def save(self, *args, **kwargs):
# fill doccoHTML
sourcefile = tempfile.NamedTemporaryFile(delete=False, dir=settings.TMP_PATH, suffix='.coffee')
sourcefile.write('# ' + self.titel.encode('utf8') + '\n')
sourcefile.write('# ' + '-' * len(self.titel.encode('utf8')) + '\n# \n')
sourcefile.write(self.quelltext.encode('utf8'))
sourcefile.close()
path = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.system('docco -t %s/template.jst -o %s %s' % (settings.DOCCO_TEMPLATE_PATH, path, sourcefile.name))
with open(os.path.join(path, os.path.basename(sourcefile.name)[:-7] + '.html')) as resultfile:
self.doccoHTML = resultfile.read().decode('utf8')
super(Kapitel, self).save(*args, **kwargs)
class Beispiel(models.Model):
'''Ein Beispiel in der Gallerie'''
titel = models.CharField(max_length=50)
autor = models.CharField(max_length=200)
quelltext = models.TextField(max_length=50000)
doccoHTML = models.TextField(max_length=100000)
bild = models.ImageField(upload_to='tutorial')
docbild = ImageSpecField([WhiteBackground(), Resize(400, 400), ], source='bild', format='JPEG', )
thumbnail = ImageSpecField([WhiteBackground(), Resize(200, 200), ], source='bild', format='JPEG', )
updated = models.DateTimeField(auto_now=True)
category = models.ForeignKey(Kategorie, null=True, blank=True)
def __unicode__(self):
return u'%s (%s)' % (self.titel, unicode(self.updated))
def save(self, *args, **kwargs):
# fill doccoHTML
sourcefile = tempfile.NamedTemporaryFile(delete=False, dir=settings.TMP_PATH, suffix='.coffee')
sourcefile.write('# ' + self.titel.encode('utf8') + '\n')
sourcefile.write('# ' + '-' * len(self.titel.encode('utf8')) + '\n#\n#\n')
if self.quelltext.startswith('#'):
sourcefile.write('# <div style="margin-top:400px;"></div>\n\n')
else:
sourcefile.write('# <div style="margin-top:400px;"></div>\n\n# .\n')
sourcefile.write(self.quelltext.encode('utf8'))
sourcefile.close()
path = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.system('docco -t %s/template.jst -o %s %s' % (settings.DOCCO_TEMPLATE_PATH, path, sourcefile.name))
with open(os.path.join(path, os.path.basename(sourcefile.name)[:-7] + '.html')) as resultfile:
self.doccoHTML = resultfile.read().decode('utf8')
super(Beispiel, self).save(*args, **kwargs)
class Aufgabe(models.Model):
'''Eine Aufgabe in der Aufgabensammlung'''
KATEGORIE_CHOICES = [('0', 'Einstieg'),
('1', 'Schleifen'),
('2', 'Funktionen'),
('3', 'Bedingungen'),
('4', 'Für Fortgeschrittene'),
]
titel = models.CharField(max_length=50)
zusammenfassung = models.TextField(max_length=500)
kategorie = models.CharField(choices=KATEGORIE_CHOICES, max_length=2)
platz = models.FloatField()
quelltext = models.TextField(max_length=50000)
doccoHTML = models.TextField(max_length=100000)
bild = models.ImageField(upload_to='tutorial')
docbild = ImageSpecField([WhiteBackground(), Resize(400, 400), ], source='bild', format='JPEG', )
thumbnail = ImageSpecField([WhiteBackground(), Resize(200, 200), ], source='bild', format='JPEG', )
def __unicode__(self):
return u'%s (%s/%d)' % (self.titel, self.kategorie, self.platz)
def save(self, *args, **kwargs):
# fill doccoHTML
sourcefile = tempfile.NamedTemporaryFile(delete=False, dir=settings.TMP_PATH, suffix='.coffee')
sourcefile.write('# ' + self.titel.encode('utf8') + '\n')
sourcefile.write('# ' + '-' * len(self.titel.encode('utf8')) + '\n# \n')
sourcefile.write(self.quelltext.encode('utf8'))
sourcefile.close()
path = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.system('docco -t %s/template.jst -o %s %s' % (settings.DOCCO_TEMPLATE_PATH, path, sourcefile.name))
with open(os.path.join(path, os.path.basename(sourcefile.name)[:-7] + '.html')) as resultfile:
self.doccoHTML = resultfile.read().decode('utf8')
super(Aufgabe, self).save(*args, **kwargs)
class Userdata(models.Model):
user = models.OneToOneField(User)
class Skript(models.Model):
key = models.CharField(max_length=20)
skript = models.TextField(max_length=2000)
ud = models.ForeignKey(Userdata)
bild = models.ImageField(upload_to='userskripte', null=True)
thumbnail = ImageSpecField([WhiteBackground(), Resize(150, 150), ], source='bild', format='png', )
def __unicode__(self):
return '%s: %s (%d)' % (self.ud.user.email, self.key, len(self.skript))
@receiver(user_activated)
def user_activated_callback(sender, user, request, **kwargs):
ud = Userdata(user=user)
ud.save()
|
ugoertz/igelgrafik
|
igelmain/models.py
|
Python
|
bsd-3-clause
| 6,801 | 0.003971 |
import pytest
def test_app_hostname_is_not_none():
from openods import app
value = app.config['APP_HOSTNAME']
assert value is not None
def test_cache_timeout_is_greater_equal_0():
from openods import app
value = app.config['CACHE_TIMEOUT']
assert value >= 0
def test_database_url_is_not_none():
from openods import app
value = app.config['DATABASE_URL']
assert value is not None
|
open-ods/open-ods
|
tests/test_openods_api_config.py
|
Python
|
gpl-3.0
| 421 | 0 |
"""Unit tests for PyGraphviz interface."""
import os
import tempfile
import pytest
import pytest
pygraphviz = pytest.importorskip('pygraphviz')
from networkx.testing import assert_edges_equal, assert_nodes_equal, \
assert_graphs_equal
import networkx as nx
class TestAGraph(object):
def build_graph(self, G):
edges = [('A', 'B'), ('A', 'C'), ('A', 'C'), ('B', 'C'), ('A', 'D')]
G.add_edges_from(edges)
G.add_node('E')
G.graph['metal'] = 'bronze'
return G
def assert_equal(self, G1, G2):
assert_nodes_equal(G1.nodes(), G2.nodes())
assert_edges_equal(G1.edges(), G2.edges())
assert G1.graph['metal'] == G2.graph['metal']
def agraph_checks(self, G):
G = self.build_graph(G)
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
self.assert_equal(G, H)
fname = tempfile.mktemp()
nx.drawing.nx_agraph.write_dot(H, fname)
Hin = nx.nx_agraph.read_dot(fname)
os.unlink(fname)
self.assert_equal(H, Hin)
(fd, fname) = tempfile.mkstemp()
with open(fname, 'w') as fh:
nx.drawing.nx_agraph.write_dot(H, fh)
with open(fname, 'r') as fh:
Hin = nx.nx_agraph.read_dot(fh)
os.unlink(fname)
self.assert_equal(H, Hin)
def test_from_agraph_name(self):
G = nx.Graph(name='test')
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
assert G.name == 'test'
def test_undirected(self):
self.agraph_checks(nx.Graph())
def test_directed(self):
self.agraph_checks(nx.DiGraph())
def test_multi_undirected(self):
self.agraph_checks(nx.MultiGraph())
def test_multi_directed(self):
self.agraph_checks(nx.MultiDiGraph())
def test_view_pygraphviz(self):
G = nx.Graph() # "An empty graph cannot be drawn."
pytest.raises(nx.NetworkXException, nx.nx_agraph.view_pygraphviz, G)
G = nx.barbell_graph(4, 6)
nx.nx_agraph.view_pygraphviz(G)
def test_view_pygraphviz_edgelable(self):
G = nx.Graph()
G.add_edge(1, 2, weight=7)
G.add_edge(2, 3, weight=8)
nx.nx_agraph.view_pygraphviz(G, edgelabel='weight')
def test_graph_with_reserved_keywords(self):
# test attribute/keyword clash case for #1582
# node: n
# edges: u,v
G = nx.Graph()
G = self.build_graph(G)
G.nodes['E']['n'] = 'keyword'
G.edges[('A', 'B')]['u'] = 'keyword'
G.edges[('A', 'B')]['v'] = 'keyword'
A = nx.nx_agraph.to_agraph(G)
def test_round_trip(self):
G = nx.Graph()
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
#assert_graphs_equal(G, H)
AA = nx.nx_agraph.to_agraph(H)
HH = nx.nx_agraph.from_agraph(AA)
assert_graphs_equal(H, HH)
G.graph['graph'] = {}
G.graph['node'] = {}
G.graph['edge'] = {}
assert_graphs_equal(G, HH)
def test_2d_layout(self):
G = nx.Graph()
G = self.build_graph(G)
G.graph["dimen"] = 2
pos = nx.nx_agraph.pygraphviz_layout(G, prog='neato')
pos = list(pos.values())
assert len(pos) == 5
assert len(pos[0]) == 2
def test_3d_layout(self):
G = nx.Graph()
G = self.build_graph(G)
G.graph["dimen"] = 3
pos = nx.nx_agraph.pygraphviz_layout(G, prog='neato')
pos = list(pos.values())
assert len(pos) == 5
assert len(pos[0]) == 3
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/drawing/tests/test_agraph.py
|
Python
|
mit
| 3,587 | 0.000836 |
import bpy
from ... base_types.node import AnimationNode
class an_EdgesOfPolygonsNode(bpy.types.Node, AnimationNode):
bl_idname = "an_EdgesOfPolygonsNode"
bl_label = "Edges of Polygons"
def create(self):
self.newInput("Polygon Indices List", "Polygons", "polygons")
self.newOutput("Edge Indices List", "Edges", "edges")
def execute(self, polygons):
edges = []
for polygon in polygons:
for i, index in enumerate(polygon):
startIndex = polygon[i - 1]
edge = (startIndex, index) if index > startIndex else (index, startIndex)
edges.append(edge)
return list(set(edges))
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/nodes/mesh/edges_of_polygons.py
|
Python
|
gpl-3.0
| 685 | 0.00292 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import os
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from sqlalchemy.engine import reflection
from nova.db import migration
from nova.db.sqlalchemy.api_migrations import migrate_repo
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import migration as sa_migration
from nova import test
from nova.tests import fixtures as nova_fixtures
class NovaAPIModelsSync(test_migrations.ModelsMigrationsSync):
"""Test that the models match the database after migrations are run."""
def db_sync(self, engine):
with mock.patch.object(sa_migration, 'get_engine',
return_value=engine):
sa_migration.db_sync(database='api')
@property
def migrate_engine(self):
return self.engine
def get_engine(self, context=None):
return self.migrate_engine
def get_metadata(self):
return api_models.API_BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model.
if name == 'migrate_version':
return False
return True
def filter_metadata_diff(self, diff):
# Filter out diffs that shouldn't cause a sync failure.
new_diff = []
# Define a whitelist of ForeignKeys that exist on the model but not in
# the database. They will be removed from the model at a later time.
fkey_whitelist = {'build_requests': ['request_spec_id']}
# Define a whitelist of columns that will be removed from the
# DB at a later release and aren't on a model anymore.
column_whitelist = {
'build_requests': ['vm_state', 'instance_metadata',
'display_name', 'access_ip_v6', 'access_ip_v4', 'key_name',
'locked_by', 'image_ref', 'progress', 'request_spec_id',
'info_cache', 'user_id', 'task_state', 'security_groups',
'config_drive']
}
for element in diff:
if isinstance(element, list):
# modify_nullable is a list
new_diff.append(element)
else:
# tuple with action as first element. Different actions have
# different tuple structures.
if element[0] == 'add_fk':
fkey = element[1]
tablename = fkey.table.name
column_keys = fkey.column_keys
if (tablename in fkey_whitelist and
column_keys == fkey_whitelist[tablename]):
continue
elif element[0] == 'remove_column':
tablename = element[2]
column = element[3]
if (tablename in column_whitelist and
column.name in column_whitelist[tablename]):
continue
new_diff.append(element)
return new_diff
class TestNovaAPIMigrationsSQLite(NovaAPIModelsSync,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaAPIMigrationsMySQL(NovaAPIModelsSync,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class TestNovaAPIMigrationsPostgreSQL(NovaAPIModelsSync,
test_base.PostgreSQLOpportunisticTestCase, test.NoDBTestCase):
pass
class NovaAPIMigrationsWalk(test_migrations.WalkVersionsMixin):
def setUp(self):
# NOTE(sdague): the oslo_db base test case completely
# invalidates our logging setup, we actually have to do that
# before it is called to keep this from vomiting all over our
# test output.
self.useFixture(nova_fixtures.StandardLogging())
super(NovaAPIMigrationsWalk, self).setUp()
@property
def INIT_VERSION(self):
return migration.db_initial_version('api')
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def _skippable_migrations(self):
mitaka_placeholders = list(range(8, 13))
newton_placeholders = list(range(21, 26))
ocata_placeholders = list(range(31, 41))
special_cases = [
30, # Enforcement migration, no changes to test
]
return (mitaka_placeholders +
newton_placeholders +
ocata_placeholders +
special_cases)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, '_check_%03d' % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('API DB Migration %i does not have a '
'test. Please add one!') % version)
super(NovaAPIMigrationsWalk, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(db_utils.column_exists(engine, table_name, column),
'Column %s.%s does not exist' % (table_name, column))
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(db_utils.index_exists(engine, table_name, index),
'Index %s on table %s does not exist' %
(index, table_name))
def assertUniqueConstraintExists(self, engine, table_name, columns):
inspector = reflection.Inspector.from_engine(engine)
constrs = inspector.get_unique_constraints(table_name)
constr_columns = [constr['column_names'] for constr in constrs]
self.assertIn(columns, constr_columns)
def assertTableNotExists(self, engine, table_name):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
db_utils.get_table, engine, table_name)
def _check_001(self, engine, data):
for column in ['created_at', 'updated_at', 'id', 'uuid', 'name',
'transport_url', 'database_connection']:
self.assertColumnExists(engine, 'cell_mappings', column)
self.assertIndexExists(engine, 'cell_mappings', 'uuid_idx')
self.assertUniqueConstraintExists(engine, 'cell_mappings',
['uuid'])
def _check_002(self, engine, data):
for column in ['created_at', 'updated_at', 'id', 'instance_uuid',
'cell_id', 'project_id']:
self.assertColumnExists(engine, 'instance_mappings', column)
for index in ['instance_uuid_idx', 'project_id_idx']:
self.assertIndexExists(engine, 'instance_mappings', index)
self.assertUniqueConstraintExists(engine, 'instance_mappings',
['instance_uuid'])
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('instance_mappings')[0]
self.assertEqual('cell_mappings', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['cell_id'], fk['constrained_columns'])
def _check_003(self, engine, data):
for column in ['created_at', 'updated_at', 'id',
'cell_id', 'host']:
self.assertColumnExists(engine, 'host_mappings', column)
self.assertIndexExists(engine, 'host_mappings', 'host_idx')
self.assertUniqueConstraintExists(engine, 'host_mappings',
['host'])
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('host_mappings')[0]
self.assertEqual('cell_mappings', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['cell_id'], fk['constrained_columns'])
def _check_004(self, engine, data):
columns = ['created_at', 'updated_at', 'id', 'instance_uuid', 'spec']
for column in columns:
self.assertColumnExists(engine, 'request_specs', column)
self.assertUniqueConstraintExists(engine, 'request_specs',
['instance_uuid'])
self.assertIndexExists(engine, 'request_specs',
'request_spec_instance_uuid_idx')
def _check_005(self, engine, data):
# flavors
for column in ['created_at', 'updated_at', 'name', 'id', 'memory_mb',
'vcpus', 'swap', 'vcpu_weight', 'flavorid', 'rxtx_factor',
'root_gb', 'ephemeral_gb', 'disabled', 'is_public']:
self.assertColumnExists(engine, 'flavors', column)
self.assertUniqueConstraintExists(engine, 'flavors',
['flavorid'])
self.assertUniqueConstraintExists(engine, 'flavors',
['name'])
# flavor_extra_specs
for column in ['created_at', 'updated_at', 'id', 'flavor_id', 'key',
'value']:
self.assertColumnExists(engine, 'flavor_extra_specs', column)
self.assertIndexExists(engine, 'flavor_extra_specs',
'flavor_extra_specs_flavor_id_key_idx')
self.assertUniqueConstraintExists(engine, 'flavor_extra_specs',
['flavor_id', 'key'])
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('flavor_extra_specs')[0]
self.assertEqual('flavors', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['flavor_id'], fk['constrained_columns'])
# flavor_projects
for column in ['created_at', 'updated_at', 'id', 'flavor_id',
'project_id']:
self.assertColumnExists(engine, 'flavor_projects', column)
self.assertUniqueConstraintExists(engine, 'flavor_projects',
['flavor_id', 'project_id'])
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('flavor_projects')[0]
self.assertEqual('flavors', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['flavor_id'], fk['constrained_columns'])
def _check_006(self, engine, data):
for column in ['id', 'request_spec_id', 'project_id', 'user_id',
'display_name', 'instance_metadata', 'progress', 'vm_state',
'image_ref', 'access_ip_v4', 'access_ip_v6', 'info_cache',
'security_groups', 'config_drive', 'key_name', 'locked_by']:
self.assertColumnExists(engine, 'build_requests', column)
self.assertIndexExists(engine, 'build_requests',
'build_requests_project_id_idx')
self.assertUniqueConstraintExists(engine, 'build_requests',
['request_spec_id'])
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('build_requests')[0]
self.assertEqual('request_specs', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['request_spec_id'], fk['constrained_columns'])
def _check_007(self, engine, data):
map_table = db_utils.get_table(engine, 'instance_mappings')
self.assertTrue(map_table.columns['cell_id'].nullable)
# Ensure the foreign key still exists
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('instance_mappings')[0]
self.assertEqual('cell_mappings', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['cell_id'], fk['constrained_columns'])
def _check_013(self, engine, data):
for column in ['instance_uuid', 'instance']:
self.assertColumnExists(engine, 'build_requests', column)
self.assertIndexExists(engine, 'build_requests',
'build_requests_instance_uuid_idx')
self.assertUniqueConstraintExists(engine, 'build_requests',
['instance_uuid'])
def _check_014(self, engine, data):
for column in ['name', 'public_key']:
self.assertColumnExists(engine, 'key_pairs', column)
self.assertUniqueConstraintExists(engine, 'key_pairs',
['user_id', 'name'])
def _check_015(self, engine, data):
build_requests_table = db_utils.get_table(engine, 'build_requests')
for column in ['request_spec_id', 'user_id', 'security_groups',
'config_drive']:
self.assertTrue(build_requests_table.columns[column].nullable)
inspector = reflection.Inspector.from_engine(engine)
constrs = inspector.get_unique_constraints('build_requests')
constr_columns = [constr['column_names'] for constr in constrs]
self.assertNotIn(['request_spec_id'], constr_columns)
def _check_016(self, engine, data):
self.assertColumnExists(engine, 'resource_providers', 'id')
self.assertIndexExists(engine, 'resource_providers',
'resource_providers_name_idx')
self.assertIndexExists(engine, 'resource_providers',
'resource_providers_uuid_idx')
self.assertColumnExists(engine, 'inventories', 'id')
self.assertIndexExists(engine, 'inventories',
'inventories_resource_class_id_idx')
self.assertColumnExists(engine, 'allocations', 'id')
self.assertColumnExists(engine, 'resource_provider_aggregates',
'aggregate_id')
def _check_017(self, engine, data):
# aggregate_metadata
for column in ['created_at',
'updated_at',
'id',
'aggregate_id',
'key',
'value']:
self.assertColumnExists(engine, 'aggregate_metadata', column)
self.assertUniqueConstraintExists(engine, 'aggregate_metadata',
['aggregate_id', 'key'])
self.assertIndexExists(engine, 'aggregate_metadata',
'aggregate_metadata_key_idx')
# aggregate_hosts
for column in ['created_at',
'updated_at',
'id',
'host',
'aggregate_id']:
self.assertColumnExists(engine, 'aggregate_hosts', column)
self.assertUniqueConstraintExists(engine, 'aggregate_hosts',
['host', 'aggregate_id'])
# aggregates
for column in ['created_at',
'updated_at',
'id',
'name']:
self.assertColumnExists(engine, 'aggregates', column)
self.assertIndexExists(engine, 'aggregates',
'aggregate_uuid_idx')
self.assertUniqueConstraintExists(engine, 'aggregates', ['name'])
def _check_018(self, engine, data):
# instance_groups
for column in ['created_at',
'updated_at',
'id',
'user_id',
'project_id',
'uuid',
'name']:
self.assertColumnExists(engine, 'instance_groups', column)
self.assertUniqueConstraintExists(engine, 'instance_groups', ['uuid'])
# instance_group_policy
for column in ['created_at',
'updated_at',
'id',
'policy',
'group_id']:
self.assertColumnExists(engine, 'instance_group_policy', column)
self.assertIndexExists(engine, 'instance_group_policy',
'instance_group_policy_policy_idx')
# Ensure the foreign key still exists
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('instance_group_policy')[0]
self.assertEqual('instance_groups', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
# instance_group_member
for column in ['created_at',
'updated_at',
'id',
'instance_uuid',
'group_id']:
self.assertColumnExists(engine, 'instance_group_member', column)
self.assertIndexExists(engine, 'instance_group_member',
'instance_group_member_instance_idx')
def _check_019(self, engine, data):
self.assertColumnExists(engine, 'build_requests',
'block_device_mappings')
def _pre_upgrade_020(self, engine):
build_requests = db_utils.get_table(engine, 'build_requests')
fake_build_req = {'id': 2020,
'project_id': 'fake_proj_id',
'block_device_mappings': 'fake_BDM'}
build_requests.insert().execute(fake_build_req)
def _check_020(self, engine, data):
build_requests = db_utils.get_table(engine, 'build_requests')
if engine.name == 'mysql':
self.assertIsInstance(build_requests.c.block_device_mappings.type,
sqlalchemy.dialects.mysql.MEDIUMTEXT)
fake_build_req = build_requests.select(
build_requests.c.id == 2020).execute().first()
self.assertEqual('fake_BDM', fake_build_req.block_device_mappings)
def _check_026(self, engine, data):
self.assertColumnExists(engine, 'resource_classes', 'id')
self.assertColumnExists(engine, 'resource_classes', 'name')
def _check_027(self, engine, data):
# quota_classes
for column in ['created_at',
'updated_at',
'id',
'class_name',
'resource',
'hard_limit']:
self.assertColumnExists(engine, 'quota_classes', column)
self.assertIndexExists(engine, 'quota_classes',
'quota_classes_class_name_idx')
# quota_usages
for column in ['created_at',
'updated_at',
'id',
'project_id',
'resource',
'in_use',
'reserved',
'until_refresh',
'user_id']:
self.assertColumnExists(engine, 'quota_usages', column)
self.assertIndexExists(engine, 'quota_usages',
'quota_usages_project_id_idx')
self.assertIndexExists(engine, 'quota_usages',
'quota_usages_user_id_idx')
# quotas
for column in ['created_at',
'updated_at',
'id',
'project_id',
'resource',
'hard_limit']:
self.assertColumnExists(engine, 'quotas', column)
self.assertUniqueConstraintExists(engine, 'quotas',
['project_id', 'resource'])
# project_user_quotas
for column in ['created_at',
'updated_at',
'id',
'user_id',
'project_id',
'resource',
'hard_limit']:
self.assertColumnExists(engine, 'project_user_quotas', column)
self.assertUniqueConstraintExists(engine, 'project_user_quotas',
['user_id', 'project_id', 'resource'])
self.assertIndexExists(engine, 'project_user_quotas',
'project_user_quotas_project_id_idx')
self.assertIndexExists(engine, 'project_user_quotas',
'project_user_quotas_user_id_idx')
# reservations
for column in ['created_at',
'updated_at',
'id',
'uuid',
'usage_id',
'project_id',
'resource',
'delta',
'expire',
'user_id']:
self.assertColumnExists(engine, 'reservations', column)
self.assertIndexExists(engine, 'reservations',
'reservations_project_id_idx')
self.assertIndexExists(engine, 'reservations',
'reservations_uuid_idx')
self.assertIndexExists(engine, 'reservations',
'reservations_expire_idx')
self.assertIndexExists(engine, 'reservations',
'reservations_user_id_idx')
# Ensure the foreign key still exists
inspector = reflection.Inspector.from_engine(engine)
# There should only be one foreign key here
fk = inspector.get_foreign_keys('reservations')[0]
self.assertEqual('quota_usages', fk['referred_table'])
self.assertEqual(['id'], fk['referred_columns'])
def _pre_upgrade_028(self, engine):
build_requests = db_utils.get_table(engine, 'build_requests')
fake_build_req = {'id': 2021,
'project_id': 'fake_proj_id',
'instance': '{"uuid": "foo", "name": "bar"}'}
build_requests.insert().execute(fake_build_req)
def _check_028(self, engine, data):
build_requests = db_utils.get_table(engine, 'build_requests')
if engine.name == 'mysql':
self.assertIsInstance(build_requests.c.block_device_mappings.type,
sqlalchemy.dialects.mysql.MEDIUMTEXT)
fake_build_req = build_requests.select(
build_requests.c.id == 2021).execute().first()
self.assertEqual('{"uuid": "foo", "name": "bar"}',
fake_build_req.instance)
def _check_029(self, engine, data):
for column in ['created_at', 'updated_at', 'id', 'uuid']:
self.assertColumnExists(engine, 'placement_aggregates', column)
def _check_041(self, engine, data):
self.assertColumnExists(engine, 'traits', 'id')
self.assertUniqueConstraintExists(engine, 'traits', ['name'])
self.assertColumnExists(engine, 'resource_provider_traits', 'trait_id')
self.assertColumnExists(engine, 'resource_provider_traits',
'resource_provider_id')
self.assertIndexExists(
engine, 'resource_provider_traits',
'resource_provider_traits_resource_provider_trait_idx')
inspector = reflection.Inspector.from_engine(engine)
self.assertEqual(
2, len(inspector.get_foreign_keys('resource_provider_traits')))
for fk in inspector.get_foreign_keys('resource_provider_traits'):
if 'traits' == fk['referred_table']:
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['trait_id'], fk['constrained_columns'])
elif 'resource_providers' == fk['referred_table']:
self.assertEqual(['id'], fk['referred_columns'])
self.assertEqual(['resource_provider_id'],
fk['constrained_columns'])
class TestNovaAPIMigrationsWalkSQLite(NovaAPIMigrationsWalk,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaAPIMigrationsWalkMySQL(NovaAPIMigrationsWalk,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class TestNovaAPIMigrationsWalkPostgreSQL(NovaAPIMigrationsWalk,
test_base.PostgreSQLOpportunisticTestCase, test.NoDBTestCase):
pass
|
vmturbo/nova
|
nova/tests/functional/db/api/test_migrations.py
|
Python
|
apache-2.0
| 25,993 | 0.001731 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from certproxy.certproxy import run
if __name__ == '__main__':
run()
|
geneanet/certproxy
|
main.py
|
Python
|
bsd-3-clause
| 118 | 0 |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import json
import os
from typing import Optional, List, Tuple, Dict, Union
from models.literalConstants import LiteralConstants
class FileProcessing:
BASE_PATH: str = os.getcwd() + "/"
def __init__(self, path: str, file_type: LiteralConstants.FileType) -> None:
self.path: str = FileProcessing.BASE_PATH + path
self.file_type: LiteralConstants.FileType = file_type
def read_file(self) -> Optional[Union[str, List, Tuple, Dict, bytes]]:
try:
file = open(self.path, 'r') if self.file_type != LiteralConstants.FileType.BYTES else open(self.path, 'rb')
if self.file_type == LiteralConstants.FileType.REG or self.file_type == LiteralConstants.FileType.BYTES:
return file.read()
elif self.file_type == LiteralConstants.FileType.JSON:
return json.load(file)
except EnvironmentError:
LiteralConstants.STA_LOG.logger.exception(LiteralConstants.ExceptionMessages.FILE_CANT_OPEN, exc_info=True)
return None
else:
file.close()
def write_file(self, data: Union[str, List, Tuple, Dict]) -> bool:
try:
file = open(self.path, 'w') if self.file_type != LiteralConstants.FileType.BYTES else open(self.path, 'wb')
if self.file_type == LiteralConstants.FileType.REG or self.file_type == LiteralConstants.FileType.BYTES:
file.write(data)
return True
elif self.file_type == LiteralConstants.FileType.JSON:
json.dump(data, file)
return True
except EnvironmentError:
LiteralConstants.STA_LOG.logger.exception(LiteralConstants.ExceptionMessages.FILE_CANT_WRITE, exc_info=True)
return False
else:
file.close()
|
gmm96/Txt2SpeechBot
|
models/fileProcessing.py
|
Python
|
gpl-3.0
| 1,856 | 0.00431 |
from show_latent import LatentView
|
mzwiessele/GPyNotebook
|
GPyNotebook/latent/__init__.py
|
Python
|
bsd-2-clause
| 34 | 0.029412 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from operator import mul
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest
from testsuite import create_op
def group_norm_naive(x, scale, bias, epsilon, groups, data_layout):
if data_layout == "NHWC":
x = np.transpose(x, (0, 3, 1, 2)) # NHWC => NCHW
N, C, H, W = x.shape
G = groups
x = x.reshape((N * G, -1))
mean = np.mean(x, axis=1, keepdims=True)
var = np.var(x, axis=1, keepdims=True)
output = (x - mean) / np.sqrt(var + epsilon)
output = output.reshape((N, C, H, W)) * scale.reshape(
(-1, 1, 1)) + bias.reshape((-1, 1, 1))
if data_layout == "NHWC":
output = np.transpose(output, (0, 2, 3, 1)) # NCHW => NHWC
return output, mean.reshape((N, G)), var.reshape((N, G))
class TestGroupNormOp(OpTest):
def setUp(self):
self.op_type = "group_norm"
self.data_format = "NCHW"
self.dtype = np.float32
self.shape = (2, 4, 3, 3)
self.attrs = {'epsilon': 1e-5, 'groups': 2, 'data_layout': "NCHW"}
self.compare_between_place = False
self.init_test_case()
input = np.random.random(self.shape).astype(self.dtype)
if self.data_format == "NHWC":
input = np.transpose(input, (0, 2, 3, 1))
scale = np.random.random([self.shape[1]]).astype(self.dtype)
bias = np.random.random([self.shape[1]]).astype(self.dtype)
output, mean, var = group_norm_naive(
input, scale, bias, self.attrs['epsilon'], self.attrs['groups'],
self.data_format)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(input),
'Scale': OpTest.np_dtype_to_fluid_dtype(scale),
'Bias': OpTest.np_dtype_to_fluid_dtype(bias)
}
self.outputs = {'Y': output, 'Mean': mean, 'Variance': var}
self.attrs['data_layout'] = self.data_format
def test_check_output(self):
atol = 1e-4
inplace_atol = 1e-4
place = core.CPUPlace()
# add inplace_atol bacause group_norm doesn't ensure computational consistency
self.check_output_with_place(
place, atol=atol, inplace_atol=inplace_atol)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=atol, inplace_atol=inplace_atol)
def do_compare_between_place(self):
if not core.is_compiled_with_cuda(): return
place = core.CPUPlace()
place2 = core.CUDAPlace(0)
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
op_attrs)
inputs_to_check = set(['X', 'Scale', 'Bias'])
output_names = 'Y'
cpu_grads = self._get_gradient(inputs_to_check, place, output_names,
None)
gpu_grads = self._get_gradient(inputs_to_check, place2, output_names,
None)
self._assert_is_close(cpu_grads, gpu_grads, inputs_to_check, 0.005,
"Gradient Check On %s" % str(place))
def test_check_grad(self):
if self.compare_between_place:
self.do_compare_between_place()
return
place = core.CPUPlace()
self.check_grad_with_place(
place, set(['X', 'Scale', 'Bias']), 'Y', max_relative_error=0.01)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
set(['X', 'Scale', 'Bias']),
'Y',
max_relative_error=0.01)
def init_test_case(self):
pass
class TestGroupNormOp1(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 1
class TestGroupNormOp2(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 4
class TestGroupNormOpBigEps1(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 1
self.attrs['epsilon'] = 0.5
class TestGroupNormOpBigEps2(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 4
self.attrs['epsilon'] = 0.5
class TestGroupNormOpBigEps3(TestGroupNormOp):
def init_test_case(self):
self.attrs['epsilon'] = 0.5
class TestGroupNormOpLargeData(TestGroupNormOp):
def init_test_case(self):
self.shape = (2, 32, 64, 64)
self.attrs['groups'] = 8
self.compare_between_place = True
class TestGroupNormOp1_With_NHWC(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 1
self.data_format = "NHWC"
class TestGroupNormOp2_With_NHWC(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 4
self.data_format = "NHWC"
class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 1
self.attrs['epsilon'] = 0.5
self.data_format = "NHWC"
class TestGroupNormOpBigEps2_With_NHWC(TestGroupNormOp):
def init_test_case(self):
self.attrs['groups'] = 4
self.attrs['epsilon'] = 0.5
self.data_format = "NHWC"
class TestGroupNormOpBigEps3_With_NHWC(TestGroupNormOp):
def init_test_case(self):
self.attrs['epsilon'] = 0.5
self.data_format = "NHWC"
class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp):
def init_test_case(self):
self.shape = (2, 64, 32, 32) # NCHW
self.attrs['groups'] = 8
self.data_format = "NHWC"
self.compare_between_place = True
class TestGroupNormAPI_With_NHWC(OpTest):
def test_case1(self):
data1 = fluid.data(name='data1', shape=[None, 3, 3, 4], dtype='float32')
out1 = fluid.layers.group_norm(
input=data1, groups=2, data_layout="NHWC")
data2 = fluid.data(name='data2', shape=[None, 4, 3, 3], dtype='float32')
out2 = fluid.layers.group_norm(
input=data2, groups=2, data_layout="NCHW")
data1_np = np.random.random((2, 3, 3, 4)).astype("float32")
data2_np = np.random.random((2, 4, 3, 3)).astype("float32")
scale = np.array([1]).astype("float32")
bias = np.array([0]).astype("float32")
place = core.CPUPlace()
exe = fluid.Executor(place)
results = exe.run(fluid.default_main_program(),
feed={"data1": data1_np,
"data2": data2_np},
fetch_list=[out1, out2],
return_numpy=True)
expect_res1 = group_norm_naive(
data1_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NHWC")
expect_res2 = group_norm_naive(
data2_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NCHW")
self.assertTrue(np.allclose(results[0], expect_res1[0]))
self.assertTrue(np.allclose(results[1], expect_res2[0]))
class TestGroupNormException(OpTest):
# data_layout is not NHWC or NCHW
def test_exception(self):
data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float32")
def attr_data_format():
out = fluid.layers.group_norm(
input=data, groups=2, data_layout="NDHW")
self.assertRaises(ValueError, attr_data_format)
if __name__ == '__main__':
unittest.main()
|
chengduoZH/Paddle
|
python/paddle/fluid/tests/unittests/test_group_norm_op.py
|
Python
|
apache-2.0
| 8,301 | 0.000482 |
"""SCons.Tool.gfortran
Tool-specific initialization for gfortran, the GNU Fortran 95/Fortran
2003 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gfortran.py 2014/09/27 12:51:43 garyo"
import SCons.Util
import fortran
def generate(env):
"""Add Builders and construction variables for gfortran to an
Environment."""
fortran.generate(env)
for dialect in ['F77', 'F90', 'FORTRAN', 'F95', 'F03']:
env['%s' % dialect] = 'gfortran'
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)
else:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
env['INC%sPREFIX' % dialect] = "-I"
env['INC%sSUFFIX' % dialect] = ""
def exists(env):
return env.Detect('gfortran')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
stonekyx/binary
|
vendor/scons-local-2.3.4/SCons/Tool/gfortran.py
|
Python
|
gpl-3.0
| 2,256 | 0.00133 |
from OpenGLCffi.GL import params
@params(api='gl', prms=['value'])
def glMinSampleShadingARB(value):
pass
|
cydenix/OpenGLCffi
|
OpenGLCffi/GL/EXT/ARB/sample_shading.py
|
Python
|
mit
| 109 | 0.027523 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import math
import operator
from pycket import values
from pycket import vector as values_vector
from pycket.arity import Arity
from pycket.error import SchemeException
from pycket.prims.expose import expose, default, unsafe
from rpython.rlib import jit, longlong2float, rarithmetic, unroll
from rpython.rlib.rarithmetic import r_uint
from rpython.rlib.objectmodel import always_inline, specialize
from rpython.rlib.rbigint import rbigint
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem.lltype import Signed
# imported for side effects
from pycket import arithmetic
def make_cmp(name, op, con):
@expose(name, simple=True, arity=Arity.geq(2))
@jit.unroll_safe
def do(args):
if len(args) < 2:
raise SchemeException("number of arguments to %s too small" % name)
idx = 2
truth = True
while idx <= len(args):
start = idx - 2
assert start >= 0
w_a, w_b = args[start], args[start + 1]
if not isinstance(w_a, values.W_Number):
raise SchemeException("expected number")
if not isinstance(w_b, values.W_Number):
raise SchemeException("expected number")
idx += 1
truth = truth and getattr(w_a, "arith_" + op)(w_b)
return con(truth)
do.__name__ = op
for args in [
("=", "eq", values.W_Bool.make),
("<", "lt", values.W_Bool.make),
(">", "gt", values.W_Bool.make),
("<=", "le", values.W_Bool.make),
(">=", "ge", values.W_Bool.make),
]:
make_cmp(*args)
@expose("integer?", [values.W_Object])
def integerp(n):
return values.W_Bool.make(isinstance(n, values.W_Number) and n.isinteger())
@expose("exact-integer?", [values.W_Object])
def exact_integerp(n):
return values.W_Bool.make(isinstance(n, values.W_Integer))
@expose("exact-nonnegative-integer?", [values.W_Object])
def exact_nonneg_integerp(n):
from rpython.rlib.rbigint import NULLRBIGINT
if isinstance(n, values.W_Fixnum):
return values.W_Bool.make(n.value >= 0)
if isinstance(n, values.W_Bignum):
return values.W_Bool.make(n.value.ge(NULLRBIGINT))
return values.w_false
@expose("exact-positive-integer?", [values.W_Object])
def exact_nonneg_integerp(n):
from rpython.rlib.rbigint import NULLRBIGINT
if isinstance(n, values.W_Fixnum):
return values.W_Bool.make(n.value > 0)
if isinstance(n, values.W_Bignum):
return values.W_Bool.make(n.value.gt(NULLRBIGINT))
return values.w_false
@always_inline
def is_real(obj):
return isinstance(obj, values.W_Real)
@expose("real?", [values.W_Object])
def realp(n):
return values.W_Bool.make(is_real(n))
@expose("inexact-real?", [values.W_Object])
def inexact_real(n):
return values.W_Bool.make(isinstance(n, values.W_Flonum))
@expose("single-flonum?", [values.W_Object])
def single_flonum(n):
return values.w_false
@expose("double-flonum?", [values.W_Object])
def double_flonum(n):
return values.W_Bool.make(isinstance(n, values.W_Flonum))
@expose("real->double-flonum", [values.W_Number])
def real_to_double_flonum(num):
if is_real(num):
return num.arith_exact_inexact()
raise SchemeException("real->double-flonum: %s is not real" % num.tostring())
@expose("rational?", [values.W_Object])
def rationalp(n):
if isinstance(n, values.W_Fixnum) or isinstance(n, values.W_Bignum):
return values.w_true
if isinstance(n, values.W_Flonum):
v = n.value
return values.W_Bool.make(not (math.isnan(v) or math.isinf(v)))
return values.W_Bool.make(isinstance(n, values.W_Rational))
def is_exact(n):
if isinstance(n, values.W_Complex):
return is_exact(n.real) and is_exact(n.imag)
return (isinstance(n, values.W_Fixnum) or
isinstance(n, values.W_Bignum) or
isinstance(n, values.W_Rational))
def is_inexact(n):
if isinstance(n, values.W_Complex):
return is_inexact(n.real) or is_inexact(n.imag)
return isinstance(n, values.W_Flonum)
@expose("exact?", [values.W_Object])
def exactp(n):
return values.W_Bool.make(is_exact(n))
@expose("inexact?", [values.W_Object])
def inexactp(n):
return values.W_Bool.make(is_inexact(n))
@expose("quotient/remainder", [values.W_Integer, values.W_Integer])
def quotient_remainder(a, b):
return values.Values._make2(a.arith_quotient(b), a.arith_mod(b)) #FIXME
def make_binary_arith(name, methname):
@expose(name, [values.W_Number, values.W_Number], simple=True)
def do(a, b):
return getattr(a, methname)(b)
do.__name__ = methname
for args in [
("quotient", "arith_quotient"),
("remainder", "arith_remainder"),
("modulo", "arith_mod"),
("expt", "arith_pow"),
]:
make_binary_arith(*args)
@expose("flexpt", [values.W_Flonum] * 2)
def flexpt(n, m):
return n.arith_pow_same(m)
def make_arith(name, neutral_element, methname, supports_zero_args):
@expose(name, simple=True)
@jit.unroll_safe
def do(args):
# XXX so far (+ '()) returns '(). need better type checking here
if not args:
if not supports_zero_args:
raise SchemeException("expected at least 1 argument to %s" % name)
return neutral_element
if len(args) == 1:
if neutral_element is not None:
return getattr(neutral_element, methname)(args[0])
return args[0]
else:
init = args[0]
for i in range(1, jit.promote(len(args))):
init = getattr(init, methname)(args[i])
return init
do.__name__ = methname
for args in [
("+" , values.W_Fixnum.ZERO , "arith_add" , True ) ,
("-" , values.W_Fixnum.ZERO , "arith_sub" , False ) ,
("*" , values.W_Fixnum.ONE , "arith_mul" , True ) ,
("/" , values.W_Fixnum.ONE , "arith_div" , False ) ,
("max" , None , "arith_max" , False ) ,
("min" , None , "arith_min" , False ) ,
("gcd" , values.W_Fixnum.ZERO , "arith_gcd" , True ) ,
("lcm" , values.W_Fixnum.ONE , "arith_lcm" , True ) ,
("bitwise-and" , values.W_Fixnum.make(-1) , "arith_and" , True ) ,
("bitwise-ior" , values.W_Fixnum.ZERO , "arith_or" , True ) ,
("bitwise-xor" , values.W_Fixnum.ZERO , "arith_xor" , True ) ,
]:
make_arith(*args)
def make_fixedtype_binary_arith(
name, methname, intversion=True, floatversion=True):
methname += "_same"
if floatversion:
@expose("fl" + name, [values.W_Flonum] * 2, simple=True)
def do(a, b):
return getattr(a, methname)(b)
do.__name__ = "fl_" + methname
if intversion:
@expose("fx" + name, [values.W_Fixnum] * 2, simple=True)
def do(a, b):
return getattr(a, methname)(b)
do.__name__ = "fx_" + methname
for args in [
("+" , "arith_add" ) ,
("-" , "arith_sub" ) ,
("*" , "arith_mul" ) ,
("/" , "arith_div" , False ) ,
("and" , "arith_and" , True , False ) ,
("max" , "arith_max" ) ,
("min" , "arith_min" ) ,
("quotient" , "arith_quotient" , True , False ) ,
("remainder" , "arith_remainder" , True , False ) ,
("modulo" , "arith_mod" , True , False ) ,
]:
make_fixedtype_binary_arith(*args)
def make_fixedtype_cmps(name, methname):
methname = "arith_%s_same" % methname
def do(a, b):
return values.W_Bool.make(getattr(a, methname)(b))
do.__name__ = "fl_" + methname
expose("fl" + name, [values.W_Flonum] * 2, simple=True)(do)
expose("unsafe-fl" + name, [unsafe(values.W_Flonum)] * 2, simple=True)(do)
def do(a, b):
return values.W_Bool.make(getattr(a, methname)(b))
do.__name__ = "fx_" + methname
expose("fx" + name, [values.W_Fixnum] * 2, simple=True)(do)
expose("unsafe-fx" + name, [unsafe(values.W_Fixnum)] * 2, simple=True)(do)
for args in [
("<", "lt"),
("<=", "le"),
(">", "gt"),
(">=", "ge"),
("=", "eq"),
]:
make_fixedtype_cmps(*args)
@expose("unsafe-flsqrt", [unsafe(values.W_Flonum)])
def flsqrt(f):
return f.arith_sqrt()
@expose("add1", [values.W_Number])
def add1(v):
return v.arith_add(values.W_Fixnum.ONE)
@expose("atan", [values.W_Number, default(values.W_Number, None)])
def atan(y, x):
if x is not None:
# FIXME: signs determine the quadrant of the result
# and care about NaNs and precision
if x.arith_zerop() is values.w_false:
z = y.arith_div(x)
else:
# we should raise exn_fail_contract_divide_by_zero
raise SchemeException("zero_divisor")
else:
z = y
return getattr(z, "arith_atan")()
def make_unary_arith(name, methname, flversion=False, fxversion=False,
unwrap_type=values.W_Number):
def do(a):
return getattr(a, methname)()
do.__name__ = methname
expose(name, [unwrap_type], simple=True)(do)
if flversion:
@expose("fl" + name, [values.W_Flonum], simple=True)
def dofl(a):
return getattr(a, methname)()
dofl.__name__ = methname
if fxversion:
@expose("fx" + name, [values.W_Fixnum], simple=True)
def dofx(a):
return getattr(a, methname)()
dofx.__name__ = methname
for args in [
("sin", "arith_sin", True),
("cos", "arith_cos", True),
("tan", "arith_tan", True),
("sinh", "arith_sinh", True),
("cosh", "arith_cosh", True),
("tanh", "arith_tanh", True),
("sqrt", "arith_sqrt", True),
("asin", "arith_asin", True),
("acos", "arith_acos", True),
# ("tan", "arith_tan", True), down below
("log", "arith_log", True),
("sub1", "arith_sub1"),
("inexact->exact", "arith_inexact_exact"),
("exact->inexact", "arith_exact_inexact"),
("zero?", "arith_zerop"),
("abs", "arith_abs", True),
("round", "arith_round", True),
("truncate", "arith_truncate", True),
("floor", "arith_floor", True),
("ceiling", "arith_ceiling", True),
("bitwise-not", "arith_not", False, False, values.W_Integer),
("exp", "arith_exp", True),
]:
make_unary_arith(*args)
@expose("odd?", [values.W_Number])
def oddp(n):
if not n.isinteger():
raise SchemeException("odd?: expected integer got %s" % n.tostring())
return n.arith_oddp()
@expose("even?", [values.W_Number])
def evenp(n):
if not n.isinteger():
raise SchemeException("even?: expected integer got %s" % n.tostring())
return n.arith_evenp()
@expose("negative?", [values.W_Number])
def negative_predicate(n):
if not is_real(n):
raise SchemeException("negative?: expected real? in argument 0")
return n.arith_negativep()
@expose("positive?", [values.W_Number])
def positive_predicate(n):
if not is_real(n):
raise SchemeException("positive?: expected real? in argument 0")
return n.arith_positivep()
@expose("bitwise-bit-set?", [values.W_Integer, values.W_Integer])
def bitwise_bit_setp(w_n, w_m):
if w_m.arith_negativep() is values.w_true:
raise SchemeException("bitwise-bit-set?: second argument must be non-negative")
if not isinstance(w_m, values.W_Fixnum):
# a bignum that has such a big bit set does not fit in memory
return w_n.arith_negativep()
v = w_n.arith_and(arith_shift(values.W_Fixnum.ONE, w_m))
if isinstance(v, values.W_Fixnum) and 0 == v.value:
return values.w_false
else:
return values.w_true
def arith_shift(w_a, w_b):
# XXX support biginteger as second argument (returning 0 and out of memory)
b = w_b.value
if b >= 0:
return w_a.arith_shl(w_b)
else:
return w_a.arith_shr(values.W_Fixnum(-b))
# don't use the decorator to make the function usable in this file
expose("arithmetic-shift", [values.W_Integer, values.W_Fixnum])(arith_shift)
@expose("fxlshift", [values.W_Fixnum, values.W_Fixnum])
def fxlshift(w_a, w_b):
b = w_b.value
if 0 <= b <= 64:
try:
res = rarithmetic.ovfcheck(w_a.value << b)
except OverflowError:
raise SchemeException(
"fxlshift: result is not a fixnum")
return values.W_Fixnum(res)
else:
raise SchemeException(
"fxlshift: expected integer >= 0 and <= 64, got %s" % w_b.tostring())
@expose("fxrshift", [values.W_Fixnum, values.W_Fixnum])
def fxrshift(w_a, w_b):
b = w_b.value
if b >= 0:
return w_a.arith_shr(w_b)
else:
raise SchemeException("fxrshift: expected positive argument, got %s"%w_b)
@expose("make-rectangular", [values.W_Number, values.W_Number])
def make_rectangular(x, y):
if not is_real(x) or not is_real(y):
raise SchemeException("make-rectangular: expected real inputs")
return values.W_Complex.from_real_pair(x, y)
## Unsafe Fixnum ops
@expose("unsafe-fxlshift", [unsafe(values.W_Fixnum), unsafe(values.W_Fixnum)])
def unsafe_fxlshift(w_a, w_b):
res = rarithmetic.intmask(w_a.value << w_b.value)
return values.W_Fixnum(res)
@expose("unsafe-fxrshift", [unsafe(values.W_Fixnum), unsafe(values.W_Fixnum)])
def unsafe_fxrshift(w_a, w_b):
res = w_a.value >> w_b.value
return values.W_Fixnum(res)
@expose("unsafe-fxand", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxand(w_a, w_b):
return w_a.arith_and(w_b)
@expose("unsafe-fxior", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxior(w_a, w_b):
return w_a.arith_or(w_b)
@expose("unsafe-fxxor", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxxor(w_a, w_b):
return w_a.arith_xor(w_b)
@expose("unsafe-fx+", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxplus(a, b):
return values.W_Fixnum(a.value + b.value)
@expose("unsafe-fx-", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxminus(a, b):
return values.W_Fixnum(a.value - b.value)
@expose("unsafe-fx*", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxtimes(a, b):
return values.W_Fixnum(a.value * b.value)
@expose("unsafe-fxmin", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxmin(a, b):
return values.W_Fixnum(min(a.value, b.value))
@expose("unsafe-fxmax", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxmax(a, b):
return values.W_Fixnum(max(a.value, b.value))
@expose("unsafe-fxmodulo", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxtimes(a, b):
return values.W_Fixnum(a.value % b.value)
@expose("unsafe-fxquotient", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxquotient(a, b):
return values.W_Fixnum(rarithmetic.int_c_div(a.value, b.value))
@expose("unsafe-fxremainder", [unsafe(values.W_Fixnum)] * 2)
def unsafe_fxquotient(w_a, w_b):
a = abs(w_a.value)
b = abs(w_b.value)
res = a % b
if w_a.value < 0:
res = -res
return values.W_Fixnum(res)
@expose("fx->fl", [values.W_Fixnum])
def fxfl(a):
return values.W_Flonum(float(a.value))
@expose("unsafe-fx->fl", [unsafe(values.W_Fixnum)])
def unsafe_fxfl(a):
return values.W_Flonum(float(a.value))
@expose("->fl", [values.W_Object])
def to_fl(n):
if isinstance(n, values.W_Fixnum):
return values.W_Flonum(float(n.value))
if isinstance(n, values.W_Bignum):
return values.W_Flonum(rbigint.tofloat(n.value))
raise SchemeException("->fl: expected an exact-integer")
@expose("real->floating-point-bytes",
[values.W_Number, values.W_Fixnum, default(values.W_Bool, values.w_false)])
def real_floating_point_bytes(n, _size, big_endian):
if isinstance(n, values.W_Flonum):
v = n.value
elif isinstance(n, values.W_Fixnum):
v = float(n.value)
elif isinstance(n, values.W_Bignum):
v = rbigint.tofloat(n.value)
else:
raise SchemeException("real->floating-point-bytes: expected real")
size = _size.value
if size != 4 and size != 8:
raise SchemeException("real->floating-point-bytes: size not 4 or 8")
intval = longlong2float.float2longlong(v)
if big_endian is not values.w_false:
intval = rarithmetic.byteswap(intval)
chars = [chr((intval >> (i * 8)) % 256) for i in range(size)]
return values.W_Bytes.from_charlist(chars)
@expose("floating-point-bytes->real",
[values.W_Bytes, default(values.W_Object, values.w_false)])
def integer_bytes_to_integer(bstr, signed):
# XXX Currently does not make use of the signed parameter
bytes = bstr.as_bytes_list()
if len(bytes) not in (4, 8):
raise SchemeException(
"floating-point-bytes->real: byte string must have length 2, 4, or 8")
val = 0
for i, v in enumerate(bytes):
val += ord(v) << (i * 8)
return values.W_Flonum(longlong2float.longlong2float(val))
@expose("integer-bytes->integer",
[values.W_Bytes,
default(values.W_Object, values.w_false),
default(values.W_Object, values.w_false),
default(values.W_Fixnum, values.W_Fixnum.ZERO),
default(values.W_Fixnum, None)])
def integer_bytes_to_integer(bstr, signed, big_endian, w_start, w_end):
bytes = bstr.as_bytes_list()
start = w_start.value
if w_end is None:
end = len(bytes)
else:
end = w_end.value
if not (0 <= start < len(bytes)):
raise SchemeException(
"integer-bytes->integer: start position not in byte string")
if not (0 <= end <= len(bytes)):
raise SchemeException(
"integer-bytes->integer: end position not in byte string")
if end < start:
raise SchemeException(
"integer-bytes->integer: end position less than start position")
length = end - start
if length not in (2, 4, 8):
raise SchemeException(
"integer-bytes->integer: byte string must have length 2, 4, or 8")
if start != 0 or end != len(bytes):
bytes = bytes[start:end]
byteorder = "little" if big_endian is values.w_false else "big"
is_signed = signed is not values.w_false
big = rbigint.frombytes(bytes, byteorder, is_signed)
try:
result = values.W_Fixnum(big.toint())
except OverflowError:
result = values.W_Bignum(big)
return result
@expose("integer->integer-bytes",
[values.W_Number,
values.W_Fixnum,
default(values.W_Object, values.w_false),
default(values.W_Object, values.w_false),
default(values.W_Bytes, None),
default(values.W_Fixnum, values.W_Fixnum.ZERO)])
@jit.unroll_safe
def integer_to_integer_bytes(n, w_size, signed, big_endian, w_dest, w_start):
from rpython.rtyper.lltypesystem import rffi
if isinstance(n, values.W_Fixnum):
intval = n.value
elif isinstance(n, values.W_Bignum):
raise NotImplementedError("not implemented yet")
else:
raise SchemeException("integer->integer-bytes: expected exact integer")
size = jit.promote(w_size.value)
if size not in (2, 4, 8):
raise SchemeException("integer->integer-bytes: size not 2, 4, or 8")
size = size
start = w_start.value
if w_dest is not None:
chars = w_dest.as_bytes_list()
result = w_dest
else:
chars = ['\x00'] * size
result = values.W_Bytes.from_charlist(chars, immutable=False)
if start < 0:
raise SchemeException(
"integer->integer-bytes: start value less than zero")
if start + size > len(chars):
raise SchemeException(
"integer->integer-bytes: byte string length is less than starting "
"position plus size")
is_signed = signed is not values.w_false
for i in range(start, start+size):
chars[i] = chr(intval & 0xFF)
intval >>= 8
if big_endian is values.w_false:
return result
# Swap the bytes if for big endian
left = start
right = start + size - 1
while left < right:
chars[left], chars[right] = chars[right], chars[left]
left, right = left + 1, right - 1
return result
@expose("integer-length", [values.W_Object])
@jit.elidable
def integer_length(obj):
if isinstance(obj, values.W_Fixnum):
val = obj.value
if val < 0:
val = ~val
n = r_uint(val)
result = 0
while n:
n >>= r_uint(1)
result += 1
return values.wrap(result)
if isinstance(obj, values.W_Bignum):
# XXX The bit_length operation on rbigints is off by one for negative
# powers of two (this may be intentional?).
# So, we detect this case and apply a correction.
bignum = obj.value
negative_power_of_two = True
if not bignum.tobool():
return values.W_Fixnum.ZERO
elif bignum.sign != -1:
negative_power_of_two = False
else:
for i in range(bignum.size - 1):
if bignum.udigit(i) != 0:
negative_power_of_two = False
break
msd = bignum.udigit(r_uint(bignum.size - 1))
while msd:
if (msd & r_uint(0x1)) and msd != r_uint(1):
negative_power_of_two = False
break
msd >>= r_uint(1)
bit_length = bignum.bit_length()
if negative_power_of_two:
bit_length -= 1
return values.wrap(bit_length)
raise SchemeException("integer-length: expected exact-integer? got %s" % obj.tostring())
# FIXME: implementation
@expose("fxvector?", [values.W_Object])
def is_fxvector(v):
return values.w_false
@expose("flvector?", [values.W_Object])
def is_flvector(v):
return values.W_Bool.make(isinstance(v, values_vector.W_FlVector))
## Unsafe Flonum ops
@expose("unsafe-fl+", [unsafe(values.W_Flonum)] * 2)
def unsafe_flplus(a, b):
return values.W_Flonum(a.value + b.value)
@expose("unsafe-fl-", [unsafe(values.W_Flonum)] * 2)
def unsafe_flminus(a, b):
return values.W_Flonum(a.value - b.value)
@expose("unsafe-fl*", [unsafe(values.W_Flonum)] * 2)
def unsafe_fltimes(a, b):
return values.W_Flonum(a.value * b.value)
@expose("unsafe-fl/", [unsafe(values.W_Flonum)] * 2)
def unsafe_fldiv(a, b):
return values.W_Flonum(a.value / b.value)
@expose("unsafe-flmin", [unsafe(values.W_Flonum)] * 2)
def unsafe_flmin(a, b):
return values.W_Flonum(min(a.value, b.value))
@expose("unsafe-flmax", [unsafe(values.W_Flonum)] * 2)
def unsafe_flmax(a, b):
return values.W_Flonum(max(a.value, b.value))
@expose("unsafe-flabs", [unsafe(values.W_Flonum)])
def unsafe_flabs(a):
return values.W_Flonum(abs(a.value))
|
magnusmorton/pycket
|
pycket/prims/numeric.py
|
Python
|
mit
| 23,301 | 0.008412 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'http://www.rlshd.net'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'RLSHD'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
sources = self.__get_post_links(html, video)
for source in sources:
if scraper_utils.excluded_link(source): continue
host = urlparse.urlparse(source).hostname
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': sources[source], 'direct': False}
hosters.append(hoster)
return hosters
def __get_post_links(self, html, video):
sources = {}
post = dom_parser.parse_dom(html, 'article', {'id': 'post-\d+'})
if post:
for fragment in dom_parser.parse_dom(post[0], 'h2'):
for match in re.finditer('href="([^"]+)', fragment):
stream_url = match.group(1)
meta = scraper_utils.parse_episode_link(stream_url)
release_quality = scraper_utils.height_get_quality(meta['height'])
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.get_quality(video, host, release_quality)
sources[stream_url] = quality
return sources
def get_url(self, video):
return self._blog_get_url(video)
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="30" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default="0" visible="eq(-4,true)"/>' % (name, i18n('auto_select')))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
html = self._http_get(self.base_url, params={'s': title}, require_debrid=True, cache_limit=1)
post_pattern = 'class="entry-title">\s*<a[^>]+href="(?P<url>[^"]*/(?P<date>\d{4}/\d{1,2}/\d{1,2})/[^"]*)[^>]+>(?P<post_title>[^<]+)'
date_format = '%Y/%m/%d'
return self._blog_proc_results(html, post_pattern, date_format, video_type, title, year)
|
odicraig/kodi2odi
|
addons/plugin.video.salts/scrapers/rlshd_scraper.py
|
Python
|
gpl-3.0
| 3,957 | 0.004043 |
"""Order/create a VLAN instance."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.managers import ordering
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
@click.command()
@click.option('--name', required=False, prompt=True, help="Vlan name")
@click.option('--datacenter', '-d', required=False, help="Datacenter shortname")
@click.option('--pod', '-p', required=False, help="Pod name. E.g dal05.pod01")
@click.option('--network', default='public', show_default=True, type=click.Choice(['public', 'private']),
help='Network vlan type')
@click.option('--billing', default='hourly', show_default=True, type=click.Choice(['hourly', 'monthly']),
help="Billing rate")
@environment.pass_env
def cli(env, name, datacenter, pod, network, billing):
"""Order/create a VLAN instance."""
item_package = ['PUBLIC_NETWORK_VLAN']
complex_type = 'SoftLayer_Container_Product_Order_Network_Vlan'
extras = {'name': name}
if pod:
datacenter = pod.split('.')[0]
mgr = SoftLayer.NetworkManager(env.client)
pods = mgr.get_pods()
for router in pods:
if router.get('name') == pod:
if network == 'public':
extras['routerId'] = router.get('frontendRouterId')
elif network == 'private':
extras['routerId'] = router.get('backendRouterId')
break
if not extras.get('routerId'):
raise exceptions.CLIAbort(
"Unable to find pod name: {}".format(pod))
if network == 'private':
item_package = ['PRIVATE_NETWORK_VLAN']
ordering_manager = ordering.OrderingManager(env.client)
result = ordering_manager.place_order(package_keyname='NETWORK_VLAN',
location=datacenter,
item_keynames=item_package,
complex_type=complex_type,
hourly=billing,
extras=extras)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', result['orderId']])
table.add_row(['created', result['orderDate']])
table.add_row(['name', result['orderDetails']['orderContainers'][0]['name']])
env.fout(table)
|
softlayer/softlayer-python
|
SoftLayer/CLI/vlan/create.py
|
Python
|
mit
| 2,493 | 0.001604 |
"""Interpolators wrap arrays to allow the array to be indexed in continuous coordinates
This module uses the trackvis coordinate system, for more information about
this coordinate system please see dipy.tracking.utils
The following modules also use this coordinate system:
dipy.tracking.utils
dipy.tracking.integration
dipy.reconst.interpolate
"""
from numpy import array
from dipy.reconst.recspeed import trilinear_interp
class OutsideImage(Exception):
pass
class Interpolator(object):
"""Class to be subclassed by different interpolator types"""
def __init__(self, data, voxel_size):
self.data = data
self.voxel_size = array(voxel_size, dtype=float, copy=True)
class NearestNeighborInterpolator(Interpolator):
"""Interpolates data using nearest neighbor interpolation"""
def __getitem__(self, index):
index = tuple(index // self.voxel_size)
if min(index) < 0:
raise OutsideImage('Negative Index')
try:
return self.data[index]
except IndexError:
raise OutsideImage
class TriLinearInterpolator(Interpolator):
"""Interpolates data using trilinear interpolation
interpolate 4d diffusion volume using 3 indices, ie data[x, y, z]
"""
def __init__(self, data, voxel_size):
super(TriLinearInterpolator, self).__init__(data, voxel_size)
if self.voxel_size.shape != (3,) or self.data.ndim != 4:
raise ValueError("Data should be 4d volume of diffusion data and "
"voxel_size should have 3 values, ie the size "
"of a 3d voxel")
def __getitem__(self, index):
index = array(index, copy=False, dtype="float")
try:
return trilinear_interp(self.data, index, self.voxel_size)
except IndexError:
raise OutsideImage
|
mdesco/dipy
|
dipy/reconst/interpolate.py
|
Python
|
bsd-3-clause
| 1,865 | 0.002681 |
# copied from OpenCAMLib Google code project, Anders Wallin says it was originally from Julian Todd. License unknown, likely to be GPL
# python stl file tools
import re
import struct
import math
import sys
###########################################################################
def TriangleNormal(x0, y0, z0, x1, y1, z1, x2, y2, z2):
# calculate facet normal
v01 = (x1 - x0, y1 - y0, z1 - z0)
v02 = (x2 - x0, y2 - y0, z2 - z0)
n = ( v01[1] * v02[2] - v01[2] * v02[1],
v01[2] * v02[0] - v01[0] * v02[2],
v01[0] * v02[1] - v01[1] * v02[0])
ln = math.sqrt(n[0] * n[0] + n[1] * n[1] + n[2] * n[2])
if ln > 0.0:
return (n[0] / ln, n[1] / ln, n[2] / ln)
###########################################################################
class reader:
def __init__(self, fn = None):
self.fn = fn
if self.fn:
fl = open(self.fn, "r")
self.isascii = self.IsAscii(fl)
fl.close()
self.little_endian = (struct.unpack("<f", struct.pack("@f", 140919.00))[0] == 140919.00)
# print "computer is little endian: ", self.little_endian
# print "file is ascii: ", self.isascii
self.nfacets = 0
self.ndegenerate = 0
self.mr = MeasureBoundingBox()
def IsAscii(self, fdata):
l = fdata.readline(1024)
isascii = l[:5] == "solid" and (len(l) == 5 or (re.search("[^A-Za-z0-9\,\.\/\;\:\'\"\+\-\s\r\n]", l[6:]) == None)) # some files say 'solid' but are binary files, we try to find a non alphanumerical character in the rest to the first line
fdata.seek(0)
return isascii
def ReadVertex(self, l):
l = l.replace(",", ".") # Catia writes ASCII STL with , as decimal point
if re.search("facet", l) or re.search("outer", l) or re.search("endloop", l) or re.search("endfacet", l):
return
vertex = re.search("vertex\s*([\d\-+\.EeDd]+)\s*([\d\-+\.EeDd]+)\s*([\d\-+\.EeDd]+)", l)
if vertex:
return (float(vertex.group(1)), float(vertex.group(2)), float(vertex.group(3)))
def BinaryReadFacets(self, fl, fs = None):
# 80 bytes of header
hdr = fl.read(80)
# 4 bytes for number of facets
self.nfacets = struct.unpack("<i", fl.read(4))[0]
nfacets = 0
# we dont loop over self.nfacets because then we can recover any broken headers that show the wrong number of facets
while True:
try:
#50 byte records with normals and vertices per facet
fl.read(12) # override normal
xyz = struct.unpack("<9f", fl.read(36)) # little endian
if TriangleNormal(xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7], xyz[8]) == None:
self.ndegenerate = self.ndegenerate + 1
if (fs):
fs.PushTriangle(xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7], xyz[8])
self.mr.PushTriangle(xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7], xyz[8])
fl.read(2) # padding
nfacets += 1
except struct.error, e:
break
if self.nfacets != nfacets:
sys.stderr.write("Number of facets according to header: %d, number of facets read: %d\n" % (self.nfacets, nfacets))
self.nfacets = nfacets
def AsciiReadFacets(self, fl, fs = None):
lines = fl.readlines()
xyz = []
for l in lines:
tpl = self.ReadVertex(l)
if tpl:
xyz.append(tpl[0])
xyz.append(tpl[1])
xyz.append(tpl[2])
if len(xyz) == 9:
if not TriangleNormal(xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7], xyz[8]):
self.ndegenerate += 1
if (fs):
fs.PushTriangle(xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7], xyz[8])
self.nfacets += 1
self.mr.PushTriangle(xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7], xyz[8])
xyz = []
################################################################################
class writer:
def __init__(self, fn, write_ascii = False):
self.fn = fn
self.ascii = write_ascii
self.scale = 1.0
def write(self, fc):
self.fl = open(self.fn, "w")
self.WriteHeader(self.fl, fc.nfacets)
for t in xrange(fc.nfacets):
x0, y0, z0, x1, y1, z1, x2, y2, z2 = fc.GetFacet(t)
self.WriteFacet(x0, y0, z0, x1, y1, z1, x2, y2, z2)
self.WriteFooter(self.fl)
self.fl.flush()
self.fl.close()
def WriteHeader(self, fl, nfacets):
if self.ascii:
fl.write("solid\n")
else:
str = "Stereolithography "
assert(len(str) == 80)
fl.write(str)
fl.write(struct.pack("<i", nfacets))
def WriteFacet(self, x0, y0, z0, x1, y1, z1, x2, y2, z2, skip_degenerated = True):
if self.scale != 1.0:
x0 *= self.scale
y0 *= self.scale
z0 *= self.scale
x1 *= self.scale
y1 *= self.scale
z1 *= self.scale
x2 *= self.scale
y2 *= self.scale
z2 *= self.scale
# calculate facet normal
n = TriangleNormal(x0, y0, z0, x1, y1, z1, x2, y2, z2)
if n == None:
if skip_degenerated: return
n = (0.0, 0.0, 0.0)
if self.ascii:
self.fl.write("facet normal %f %f %f\n" % n)
self.fl.write("outer loop\n vertex %f %f %f\n vertex %f %f %f\n vertex %f %f %f\nendloop\nendfacet\n" %
(x0, y0, z0, x1, y1, z1, x2, y2, z2))
else:
self.fl.write(struct.pack("<12f2c", n[0], n[1], n[2], x0, y0, z0, x1, y1, z1, x2, y2, z2, " ", " "))
def WriteFooter(self, fl):
if self.ascii:
fl.write("endsolid\n")
def PushTriangle(self, x0, y0, z0, x1, y1, z1, x2, y2, z2):
self.WriteFacet(x0, y0, z0, x1, y1, z1, x2, y2, z2)
################################################################################
class MeasureBoundingBox:
def __init__(self):
self.xlo = None
self.xhi = None
self.ylo = None
self.yhi = None
self.zlo = None
self.zhi = None
def PushTriangle(self, x0, y0, z0, x1, y1, z1, x2, y2, z2):
for v in [(x0, y0, z0), (x1, y1, z1), (x2, y2, z2)]:
if self.xlo is None or v[0] < self.xlo:
self.xlo = v[0]
if self.ylo is None or v[1] < self.ylo:
self.ylo = v[1]
if self.zlo is None or v[2] < self.zlo:
self.zlo = v[2]
if self.xhi is None or v[0] > self.xhi:
self.xhi = v[0]
if self.yhi is None or v[1] > self.yhi:
self.yhi = v[1]
if self.zhi is None or v[2] > self.zhi:
self.zhi = v[2]
###########################################################################
class converter(reader):
def __init__(self, fin = None):
reader.__init__(self, fin)
# read to find number of facets, but substract degenerated facets
self.wr = None
def convert(self, fout, freadfrom = None):
if self.fn:
rmod = self.isascii and "r" or "rb"
fl = open(self.fn, rmod)
if self.isascii:
self.AsciiReadFacets(fl)
else:
self.BinaryReadFacets(fl)
fl.close()
elif freadfrom:
if self.isascii:
self.AsciiReadFacets(freadfrom)
else:
self.BinaryReadFacets(freadfrom)
freadfrom.seek(0) # rewind to start
self.wr = writer(fout, not self.isascii)
wmod = self.isascii and "wb" or "w"
self.fpout = open(fout, wmod)
self.wr.fl = self.fpout
self.wr.WriteHeader(self.fpout, self.nfacets - self.ndegenerate)
self.ndegenerate = 0
if self.fn:
rmod = self.isascii and "r" or "rb"
fl = open(self.fn, rmod)
if self.isascii:
self.AsciiReadFacets(fl, self)
else:
self.BinaryReadFacets(fl, self)
fl.close()
elif freadfrom:
if self.isascii:
self.AsciiReadFacets(freadfrom, self)
else:
self.BinaryReadFacets(freadfrom, self)
self.wr.WriteFooter(self.fpout)
self.fpout.close()
def PushTriangle(self, x0, y0, z0, x1, y1, z1, x2, y2, z2):
if self.wr != None:
self.wr.WriteFacet(x0, y0, z0, x1, y1, z1, x2, y2, z2)
###########################################################################
# use all the options flag. could have -tk which causes it to import using tk, -in, -out
# design all the settings so it works as pipe, or as files.
# stltools --in=file.stl --out=fil1.stl -b/-a if --out missing then piping
# stltools --tk does the following.
# stltools --in=file.stl --stats prints bounding box etc.
if __name__ == '__main__':
import tkFileDialog
fin = tkFileDialog.askopenfilename(
defaultextension = '*.stl',
filetypes = [('Stereolithography','*.stl'),('all files','*.*')],
title = "Open STL")
a = converter(fin)
t = a.isascii and "Save as STL (Binary format)" or "Save as STL (ASCII format)"
fout = tkFileDialog.asksaveasfilename(
defaultextension = '*.stl',
filetypes = [('Stereolithography','*.stl'),('all files','*.*')],
title = t)
a.convert(fout)
# Example STL ascii file:
#
# solid
# ...
# facet normal 0.00 0.00 1.00
# outer loop
# vertex 2.00 2.00 0.00
# vertex -1.00 1.00 0.00
# vertex 0.00 -1.00 0.00
# endloop
# endfacet
# ...
# endsolid
|
JohnyEngine/CNC
|
heekscnc/STLTools.py
|
Python
|
apache-2.0
| 10,565 | 0.015618 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class GadgetTest( GafferUITest.TestCase ) :
def testTransform( self ) :
g = GafferUI.TextGadget( "hello" )
self.assertEqual( g.getTransform(), IECore.M44f() )
t = IECore.M44f.createScaled( IECore.V3f( 2 ) )
g.setTransform( t )
self.assertEqual( g.getTransform(), t )
c1 = GafferUI.LinearContainer()
c1.addChild( g )
c2 = GafferUI.LinearContainer()
c2.addChild( c1 )
t2 = IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
c2.setTransform( t2 )
self.assertEqual( g.fullTransform(), t * t2 )
self.assertEqual( g.fullTransform( c1 ), t )
def testToolTip( self ) :
g = GafferUI.TextGadget( "hello" )
self.assertEqual( g.getToolTip( IECore.LineSegment3f() ), "" )
g.setToolTip( "hi" )
self.assertEqual( g.getToolTip( IECore.LineSegment3f() ), "hi" )
def testDerivationInPython( self ) :
class MyGadget( GafferUI.Gadget ) :
def __init__( self ) :
GafferUI.Gadget.__init__( self )
def bound( self ) :
return IECore.Box3f( IECore.V3f( -20, 10, 2 ), IECore.V3f( 10, 15, 5 ) )
mg = MyGadget()
# we can't call the methods of the gadget directly in python to test the
# bindings, as that doesn't prove anything (we're no exercising the virtual
# method override code in the wrapper). instead cause c++ to call through
# for us by adding our gadget to a parent and making calls to the parent.
c = GafferUI.IndividualContainer()
c.addChild( mg )
self.assertEqual( c.bound().size(), mg.bound().size() )
def testStyle( self ) :
g = GafferUI.TextGadget( "test" )
l = GafferUI.LinearContainer()
l.addChild( g )
self.assertEqual( g.getStyle(), None )
self.assertEqual( l.getStyle(), None )
self.failUnless( g.style().isSame( GafferUI.Style.getDefaultStyle() ) )
self.failUnless( l.style().isSame( GafferUI.Style.getDefaultStyle() ) )
s = GafferUI.StandardStyle()
l.setStyle( s )
self.failUnless( l.getStyle().isSame( s ) )
self.assertEqual( g.getStyle(), None )
self.failUnless( g.style().isSame( s ) )
self.failUnless( l.style().isSame( s ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferUI )
self.assertTypeNamesArePrefixed( GafferUITest )
def testRenderRequestOnStyleChange( self ) :
g = GafferUI.Gadget()
cs = GafferTest.CapturingSlot( g.renderRequestSignal() )
self.assertEqual( len( cs ), 0 )
s = GafferUI.StandardStyle()
g.setStyle( s )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( g ) )
s2 = GafferUI.StandardStyle()
g.setStyle( s2 )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[1][0].isSame( g ) )
s2.setColor( GafferUI.StandardStyle.Color.BackgroundColor, IECore.Color3f( 1 ) )
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[2][0].isSame( g ) )
def testHighlighting( self ) :
g = GafferUI.Gadget()
self.assertEqual( g.getHighlighted(), False )
g.setHighlighted( True )
self.assertEqual( g.getHighlighted(), True )
g.setHighlighted( False )
self.assertEqual( g.getHighlighted(), False )
cs = GafferTest.CapturingSlot( g.renderRequestSignal() )
g.setHighlighted( False )
self.assertEqual( len( cs ), 0 )
g.setHighlighted( True )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( g ) )
def testVisibility( self ) :
g1 = GafferUI.Gadget()
self.assertEqual( g1.getVisible(), True )
self.assertEqual( g1.visible(), True )
g1.setVisible( False )
self.assertEqual( g1.getVisible(), False )
self.assertEqual( g1.visible(), False )
g2 = GafferUI.Gadget()
g1.addChild( g2 )
self.assertEqual( g2.getVisible(), True )
self.assertEqual( g2.visible(), False )
g1.setVisible( True )
self.assertEqual( g2.visible(), True )
g3 = GafferUI.Gadget()
g2.addChild( g3 )
self.assertEqual( g3.getVisible(), True )
self.assertEqual( g3.visible(), True )
g1.setVisible( False )
self.assertEqual( g3.getVisible(), True )
self.assertEqual( g3.visible(), False )
self.assertEqual( g3.visible( relativeTo = g2 ), True )
self.assertEqual( g3.visible( relativeTo = g1 ), True )
def testVisibilitySignals( self ) :
g = GafferUI.Gadget()
cs = GafferTest.CapturingSlot( g.renderRequestSignal() )
self.assertEqual( len( cs ), 0 )
g.setVisible( True )
self.assertEqual( len( cs ), 0 )
g.setVisible( False )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0][0], g )
g.setVisible( False )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0][0], g )
g.setVisible( True )
self.assertEqual( len( cs ), 2 )
self.assertEqual( cs[1][0], g )
def testBoundIgnoresHiddenChildren( self ) :
g = GafferUI.Gadget()
t = GafferUI.TextGadget( "text" )
g.addChild( t )
b = t.bound()
self.assertEqual( g.bound(), b )
t.setVisible( False )
# we still want to know what the bound would be for t,
# even when it's hidden.
self.assertEqual( t.bound(), b )
# but we don't want it taken into account when computing
# the parent bound.
self.assertEqual( g.bound(), IECore.Box3f() )
def testVisibilityChangedSignal( self ) :
g = GafferUI.Gadget()
g["a"] = GafferUI.Gadget()
g["a"]["c"] = GafferUI.Gadget()
g["b"] = GafferUI.Gadget()
events = []
def visibilityChanged( gadget ) :
events.append( ( gadget, gadget.visible() ) )
connnections = [
g.visibilityChangedSignal().connect( visibilityChanged ),
g["a"].visibilityChangedSignal().connect( visibilityChanged ),
g["a"]["c"].visibilityChangedSignal().connect( visibilityChanged ),
g["b"].visibilityChangedSignal().connect( visibilityChanged ),
]
g["b"].setVisible( True )
self.assertEqual( len( events ), 0 )
g["b"].setVisible( False )
self.assertEqual( len( events ), 1 )
self.assertEqual( events[0], ( g["b"], False ) )
g["b"].setVisible( True )
self.assertEqual( len( events ), 2 )
self.assertEqual( events[1], ( g["b"], True ) )
g["a"].setVisible( True )
self.assertEqual( len( events ), 2 )
g["a"].setVisible( False )
self.assertEqual( len( events ), 4 )
self.assertEqual( events[-2], ( g["a"]["c"], False ) )
self.assertEqual( events[-1], ( g["a"], False ) )
g["a"].setVisible( True )
self.assertEqual( len( events ), 6 )
self.assertEqual( events[-2], ( g["a"]["c"], True ) )
self.assertEqual( events[-1], ( g["a"], True ) )
g["a"]["c"].setVisible( False )
self.assertEqual( len( events ), 7 )
self.assertEqual( events[-1], ( g["a"]["c"], False ) )
g.setVisible( False )
self.assertEqual( len( events ), 10 )
self.assertEqual( events[-3], ( g["a"], False ) )
self.assertEqual( events[-2], ( g["b"], False ) )
self.assertEqual( events[-1], ( g, False ) )
g["a"]["c"].setVisible( True )
self.assertEqual( len( events ), 10 )
if __name__ == "__main__":
unittest.main()
|
chippey/gaffer
|
python/GafferUITest/GadgetTest.py
|
Python
|
bsd-3-clause
| 8,732 | 0.065964 |
import unittest
from distutils.errors import CompileError
from pythran.tests import TestFromDir
import os
import pythran
from pythran.syntax import PythranSyntaxError
from pythran.spec import Spec
class TestOpenMP(TestFromDir):
path = os.path.join(os.path.dirname(__file__), "openmp")
class TestOpenMP4(TestFromDir):
path = os.path.join(os.path.dirname(__file__), "openmp.4")
@staticmethod
def interface(name, file=None):
return Spec({name: []})
@staticmethod
def extract_runas(name, filepath):
return ['#runas {}()'.format(name)]
class TestOpenMPLegacy(TestFromDir):
'''
Test old style OpenMP constructs, not using comments but strings
and relying on function-scope locals
'''
path = os.path.join(os.path.dirname(__file__), "openmp.legacy")
@staticmethod
def interface(name, file=None):
return Spec({name: []})
@staticmethod
def extract_runas(name, filepath):
return ['#runas {}()'.format(name)]
# only activate OpenMP tests if the underlying compiler supports OpenMP
try:
pythran.compile_cxxcode("omp", '#include <omp.h>',
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])
import omp
if '-fopenmp' in pythran.config.cfg.get('compiler', 'ldflags'):
TestOpenMP4.populate(TestOpenMP4)
TestOpenMP.populate(TestOpenMP)
TestOpenMPLegacy.populate(TestOpenMPLegacy)
except PythranSyntaxError:
raise
except (CompileError, ImportError):
pass
if __name__ == '__main__':
unittest.main()
|
serge-sans-paille/pythran
|
pythran/tests/test_openmp.py
|
Python
|
bsd-3-clause
| 1,597 | 0.001879 |
# testyacc.py
import unittest
try:
import StringIO
except ImportError:
import io as StringIO
import sys
import os
sys.path.insert(0,"..")
sys.tracebacklimit = 0
import ply.yacc
def check_expected(result,expected):
resultlines = []
for line in result.splitlines():
if line.startswith("WARNING: "):
line = line[9:]
elif line.startswith("ERROR: "):
line = line[7:]
resultlines.append(line)
expectedlines = expected.splitlines()
if len(resultlines) != len(expectedlines):
return False
for rline,eline in zip(resultlines,expectedlines):
if not rline.endswith(eline):
return False
return True
def run_import(module):
code = "import "+module
exec(code)
del sys.modules[module]
# Tests related to errors and warnings when building parsers
class YaccErrorWarningTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
try:
os.remove("parsetab.py")
os.remove("parsetab.pyc")
except OSError:
pass
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_yacc_badargs(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badargs")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badargs.py:23: Rule 'p_statement_assign' has too many arguments\n"
"yacc_badargs.py:27: Rule 'p_statement_expr' requires an argument\n"
))
def test_yacc_badid(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badid")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badid.py:32: Illegal name 'bad&rule' in rule 'statement'\n"
"yacc_badid.py:36: Illegal rule name 'bad&rule'\n"
))
def test_yacc_badprec(self):
try:
run_import("yacc_badprec")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"precedence must be a list or tuple\n"
))
def test_yacc_badprec2(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badprec2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Bad precedence table\n"
))
def test_yacc_badprec3(self):
run_import("yacc_badprec3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Precedence already specified for terminal 'MINUS'\n"
"Generating LALR tables\n"
))
def test_yacc_badrule(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badrule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badrule.py:24: Syntax error. Expected ':'\n"
"yacc_badrule.py:28: Syntax error in rule 'statement'\n"
"yacc_badrule.py:33: Syntax error. Expected ':'\n"
"yacc_badrule.py:42: Syntax error. Expected ':'\n"
))
def test_yacc_badtok(self):
try:
run_import("yacc_badtok")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"tokens must be a list or tuple\n"))
def test_yacc_dup(self):
run_import("yacc_dup")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_dup.py:27: Function p_statement redefined. Previously defined on line 23\n"
"Token 'EQUALS' defined, but not used\n"
"There is 1 unused token\n"
"Generating LALR tables\n"
))
def test_yacc_error1(self):
try:
run_import("yacc_error1")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error1.py:61: p_error() requires 1 argument\n"))
def test_yacc_error2(self):
try:
run_import("yacc_error2")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error2.py:61: p_error() requires 1 argument\n"))
def test_yacc_error3(self):
try:
run_import("yacc_error3")
except ply.yacc.YaccError:
e = sys.exc_info()[1]
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"'p_error' defined, but is not a function or method\n"))
def test_yacc_error4(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_error4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error4.py:62: Illegal rule name 'error'. Already defined as a token\n"
))
def test_yacc_inf(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_inf")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Token 'NUMBER' defined, but not used\n"
"There is 1 unused token\n"
"Infinite recursion detected for symbol 'statement'\n"
"Infinite recursion detected for symbol 'expression'\n"
))
def test_yacc_literal(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_literal")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_literal.py:36: Literal token '**' in rule 'expression' may only be a single character\n"
))
def test_yacc_misplaced(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_misplaced")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_misplaced.py:32: Misplaced '|'\n"
))
def test_yacc_missing1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_missing1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_missing1.py:24: Symbol 'location' used, but not defined as a token or a rule\n"
))
def test_yacc_nested(self):
run_import("yacc_nested")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"A\n"
"A\n"
"A\n",
))
def test_yacc_nodoc(self):
run_import("yacc_nodoc")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_nodoc.py:27: No documentation string specified in function 'p_statement_expr' (ignored)\n"
"Generating LALR tables\n"
))
def test_yacc_noerror(self):
run_import("yacc_noerror")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"no p_error() function is defined\n"
"Generating LALR tables\n"
))
def test_yacc_nop(self):
run_import("yacc_nop")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_nop.py:27: Possible grammar rule 'statement_expr' defined without p_ prefix\n"
"Generating LALR tables\n"
))
def test_yacc_notfunc(self):
run_import("yacc_notfunc")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"'p_statement_assign' not defined as a function\n"
"Token 'EQUALS' defined, but not used\n"
"There is 1 unused token\n"
"Generating LALR tables\n"
))
def test_yacc_notok(self):
try:
run_import("yacc_notok")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No token list is defined\n"))
def test_yacc_rr(self):
run_import("yacc_rr")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
"1 reduce/reduce conflict\n"
"reduce/reduce conflict in state 15 resolved using rule (statement -> NAME EQUALS NUMBER)\n"
"rejected rule (expression -> NUMBER)\n"
))
def test_yacc_simple(self):
run_import("yacc_simple")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
))
def test_yacc_sr(self):
run_import("yacc_sr")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
"20 shift/reduce conflicts\n"
))
def test_yacc_term1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_term1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_term1.py:24: Illegal rule name 'NUMBER'. Already defined as a token\n"
))
def test_yacc_unused(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_unused")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_unused.py:62: Symbol 'COMMA' used, but not defined as a token or a rule\n"
"Symbol 'COMMA' is unreachable\n"
"Symbol 'exprlist' is unreachable\n"
))
def test_yacc_unused_rule(self):
run_import("yacc_unused_rule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_unused_rule.py:62: Rule 'integer' defined, but not used\n"
"There is 1 unused rule\n"
"Symbol 'integer' is unreachable\n"
"Generating LALR tables\n"
))
def test_yacc_uprec(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec")
result = sys.stderr.getvalue()
print repr(result)
self.assert_(check_expected(result,
"yacc_uprec.py:37: Nothing known about the precedence of 'UMINUS'\n"
))
def test_yacc_uprec2(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_uprec2.py:37: Syntax error. Nothing follows %prec\n"
))
def test_yacc_prec1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_prec1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Precedence rule 'left' defined for unknown symbol '+'\n"
"Precedence rule 'left' defined for unknown symbol '*'\n"
"Precedence rule 'left' defined for unknown symbol '-'\n"
"Precedence rule 'left' defined for unknown symbol '/'\n"
))
unittest.main()
|
anuragiitg/nixysa
|
third_party/ply-3.1/test/testyacc.py
|
Python
|
apache-2.0
| 13,190 | 0.006444 |
import requests
import json
import restful_webapi
import os
# This example shows how to use the Requests library with RESTful API
# This web service stores arbitrary JSON data under integer keys
# We can use GET/POST/PUT/DELETE HTTP methods to modify the data
# Run a local server that we can use
restful_webapi.run_server()
service = 'http://localhost:8000'
print "Creating the message..."
created = None
data = json.dumps("Hello world")
r = requests.post(service, data=data)
created = r.json()['created']
print "Message ID: " + str(created)
print "Showing the message..."
r = requests.get(service + '/' + str(created))
print "Service returned: " + str(r.json())
print "Updating the message..."
data = json.dumps("Welcome, world")
r = requests.put(service + '/' + str(created), data=data)
print "Service returned: " + str(r.json())
print "Showing the message again..."
r = requests.get(service + '/' + str(created))
print "Service returned: " + str(r.json())
print "Deleting the message..."
r = requests.delete(service + '/' + str(created))
print "Service returned: " + str(r.json())
# Stop the s
|
sudikrt/costproML
|
staticDataGSir/restful.py
|
Python
|
apache-2.0
| 1,107 | 0 |
# -*- coding:utf-8 -*-
# @author xupingmao <578749341@qq.com>
# @since 2020/08/22 21:54:56
# @modified 2022/02/26 10:40:22
import xauth
import xtemplate
import xutils
import os
import re
import sys
import platform
import xconfig
from xutils import dateutil
from xutils import fsutil
from xutils import Storage
from xutils import mem_util
try:
import sqlite3
except ImportError:
sqlite3 = None
def get_xnote_version():
return xconfig.get_global_config("system.version")
def get_mem_info():
mem_used = 0
mem_total = 0
result = mem_util.get_mem_info()
mem_used = result.mem_used
sys_mem_used = result.sys_mem_used
sys_mem_total = result.sys_mem_total
return "%s/%s/%s" % (mem_used, sys_mem_used, sys_mem_total)
def get_python_version():
return sys.version
def get_startup_time():
return dateutil.format_time(xconfig.START_TIME)
def get_free_data_space():
try:
size = fsutil.get_free_space(xconfig.get_system_dir("data"))
return xutils.format_size(size)
except:
xutils.print_exc()
return "<未知>"
class SystemInfoItem:
def __init__(self, name = "", value = ""):
self.name = name
self.value = value
class InfoHandler:
@xauth.login_required("admin")
def GET(self):
items = [
SystemInfoItem("Python版本", value = get_python_version()),
SystemInfoItem("Xnote版本", value = get_xnote_version()),
SystemInfoItem("内存信息", value = get_mem_info()),
SystemInfoItem("磁盘可用容量", get_free_data_space()),
SystemInfoItem("sqlite版本", sqlite3.sqlite_version if sqlite3 != None else ''),
SystemInfoItem("CPU型号", platform.processor()),
SystemInfoItem("操作系统", platform.system()),
SystemInfoItem("操作系统版本", platform.version()),
SystemInfoItem("系统启动时间", get_startup_time()),
]
return xtemplate.render("system/page/system_info.html", items = items,
runtime_id = xconfig.RUNTIME_ID)
xurls = (
r"/system/info", InfoHandler
)
|
xupingmao/xnote
|
handlers/system/system_info.py
|
Python
|
gpl-3.0
| 2,138 | 0.013069 |
#
# Copyright (C) 2010 Kelvin Lawson (kelvinl@users.sourceforge.net)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""This module defines the class pykPlayer, which is a base class used
by the modules pykar.py, pycdg.py, and pympg.py. This collects
together some common interfaces used by these different
implementations for different types of Karaoke files."""
from pykconstants import *
from pykmanager import manager
from pykenv import env
import pygame
import sys
import types
import os
class pykPlayer:
def __init__(self, song, songDb,
errorNotifyCallback = None, doneCallback = None,
windowTitle = None):
"""The first parameter, song, may be either a pykdb.SongStruct
instance, or it may be a filename. """
if songDb == None:
import pykdb
songDb = pykdb.globalSongDB
songDb.LoadSettings(None)
self.songDb = songDb
# Set the global command-line options if they have not already
# been set.
if manager.options == None:
parser = self.SetupOptions()
(manager.options, args) = parser.parse_args()
manager.ApplyOptions(self.songDb)
if song is None:
if (len(args) != 1):
parser.print_help()
sys.exit(2)
song = args[0]
# Unfortunately, we can't capture sound when dumping. There
# are two reasons for this. (1) pymedia doesn't currently
# support multiplexing audio with a video stream, so when
# you're dumping an mpeg file, it has to be video-only. (2)
# pygame doesn't provide a way for us to programmatically
# convert a midi file to sound samples anyway--all you can do
# with a midi file is route it through the speakers.
# So, for these reasons, we always just disable sound when
# dumping images or movies.
if manager.options.dump:
manager.options.nomusic = True
if isinstance(song, types.StringTypes):
# We were given a filename. Convert it to a SongStruct.
song = self.songDb.makeSongStruct(song)
# Store the parameters
self.Song = song
self.WindowTitle = windowTitle
# And look up the actual files corresponding to this SongStruct.
self.SongDatas = song.GetSongDatas()
if windowTitle is None:
self.WindowTitle = song.DisplayFilename
# Caller can register a callback by which we
# print out error information, use stdout if none registered
if errorNotifyCallback:
self.ErrorNotifyCallback = errorNotifyCallback
else:
self.ErrorNotifyCallback = self.__defaultErrorPrint
# Caller can register a callback by which we
# let them know when the song is finished
if doneCallback:
self.SongFinishedCallback = doneCallback
else:
self.SongFinishedCallback = None
self.State = STATE_INIT
self.InternalOffsetTime = 0
# These values are used to keep track of the current position
# through the song based on pygame's get_ticks() interface.
# It's used only when get_pos() cannot be used or is
# unreliable for some reason.
self.PlayTime = 0
self.PlayStartTime = 0
self.PlayFrame = 0
# self.PlayStartTime is valid while State == STATE_PLAYING; it
# indicates the get_ticks() value at which the song started
# (adjusted for any pause intervals that occurred during
# play). self.PlayTime is valid while State != STATE_PLAYING;
# it indicates the total number of ticks (milliseconds) that
# have elapsed in the song so far.
# self.PlayFrame starts at 0 and increments once for each
# frame. It's not very meaningful, except in STATE_CAPTURING
# mode.
# Keep track of the set of modifier buttons that are held
# down. This is currently used only for the GP2X interface.
self.ShoulderLHeld = False
self.ShoulderRHeld = False
# Set this true if the player can zoom font sizes.
self.SupportsFontZoom = False
# The following methods are part of the public API and intended to
# be exported from this class.
def Validate(self):
""" Returns True if the karaoke file appears to be playable
and contains lyrics, or False otherwise. """
return self.doValidate()
def Play(self):
self.doPlay()
if manager.options.dump:
self.setupDump()
else:
self.PlayStartTime = pygame.time.get_ticks()
self.State = STATE_PLAYING
# Pause the song - Use Pause() again to unpause
def Pause(self):
if self.State == STATE_PLAYING:
self.doPause()
self.PlayTime = pygame.time.get_ticks() - self.PlayStartTime
self.State = STATE_PAUSED
elif self.State == STATE_PAUSED:
self.doUnpause()
self.PlayStartTime = pygame.time.get_ticks() - self.PlayTime
self.State = STATE_PLAYING
# Close the whole thing down
def Close(self):
self.State = STATE_CLOSING
# you must call Play() to restart. Blocks until pygame is initialised
def Rewind(self):
self.doRewind()
self.PlayTime = 0
self.PlayStartTime = 0
self.PlayFrame = 0
self.State = STATE_NOT_PLAYING
# Stop the song and go back to the start. As you would
# expect Stop to do on a CD player. Play() restarts from
# the beginning
def Stop(self):
self.Rewind()
# Get the song length (in seconds)
def GetLength(self):
ErrorString = "GetLength() not supported"
self.ErrorNotifyCallback (ErrorString)
return None
# Get the current time (in milliseconds).
def GetPos(self):
if self.State == STATE_PLAYING:
return pygame.time.get_ticks() - self.PlayStartTime
else:
return self.PlayTime
def SetupOptions(self, usage = None):
""" Initialise and return optparse OptionParser object,
suitable for parsing the command line options to this
application. """
if usage == None:
usage = "%prog [options] <Karaoke file>"
return manager.SetupOptions(usage, self.songDb)
# Below methods are internal.
def setupDump(self):
# Capture the output as a sequence of numbered frame images.
self.PlayTime = 0
self.PlayStartTime = 0
self.PlayFrame = 0
self.State = STATE_CAPTURING
self.dumpFrameRate = manager.options.dump_fps
assert self.dumpFrameRate
filename = manager.options.dump
base, ext = os.path.splitext(filename)
ext_lower = ext.lower()
self.dumpEncoder = None
if ext_lower == '.mpg':
# Use pymedia to convert frames to an mpeg2 stream
# on-the-fly.
import pymedia
import pymedia.video.vcodec as vcodec
self.dumpFile = open(filename, 'wb')
frameRate = int(self.dumpFrameRate * 100 + 0.5)
self.dumpFrameRate = float(frameRate) / 100.0
params= { \
'type': 0,
'gop_size': 12,
'frame_rate_base': 125,
'max_b_frames': 0,
'height': manager.options.size_y,
'width': manager.options.size_x,
'frame_rate': frameRate,
'deinterlace': 0,
'bitrate': 9800000,
'id': vcodec.getCodecID('mpeg2video')
}
self.dumpEncoder = vcodec.Encoder( params )
return
# Don't dump a video file; dump a sequence of frames instead.
self.dumpPPM = (ext_lower == '.ppm' or ext_lower == '.pnm')
self.dumpAppend = False
# Convert the filename to a pattern.
if '#' in filename:
hash = filename.index('#')
end = hash
while end < len(filename) and filename[end] == '#':
end += 1
count = end - hash
filename = filename[:hash] + '%0' + str(count) + 'd' + filename[end:]
else:
# There's no hash in the filename.
if self.dumpPPM:
# We can dump a series of frames all to the same file,
# if we're dumping ppm frames. Mjpegtools likes this.
self.dumpAppend = True
try:
os.remove(filename)
except OSError:
pass
else:
# Implicitly append a frame number.
filename = base + '%04d' + ext
self.dumpFilename = filename
def doFrameDump(self):
if self.dumpEncoder:
import pymedia.video.vcodec as vcodec
ss = pygame.image.tostring(manager.surface, "RGB")
bmpFrame = vcodec.VFrame(
vcodec.formats.PIX_FMT_RGB24,
manager.surface.get_size(), (ss,None,None))
yuvFrame = bmpFrame.convert(vcodec.formats.PIX_FMT_YUV420P)
d = self.dumpEncoder.encode(yuvFrame)
self.dumpFile.write(d.data)
return
if self.dumpAppend:
filename = self.dumpFilename
else:
filename = self.dumpFilename % self.PlayFrame
print filename
if self.dumpPPM:
# Dump a PPM file. We do PPM by hand since pygame
# doesn't support it directly, but it's so easy and
# useful.
w, h = manager.surface.get_size()
if self.dumpAppend:
f = open(filename, 'ab')
else:
f = open(filename, 'wb')
f.write('P6\n%s %s 255\n' % (w, h))
f.write(pygame.image.tostring(manager.surface, 'RGB'))
else:
# Ask pygame to dump the file. We trust that pygame knows
# how to store an image in the requested format.
pygame.image.save(manager.surface, filename)
def doValidate(self):
return True
def doPlay(self):
pass
def doPause(self):
pass
def doUnpause(self):
pass
def doRewind(self):
pass
def doStuff(self):
# Override this in a derived class to do some useful per-frame
# activity.
# Common handling code for a close request or if the
# pygame window was quit
if self.State == STATE_CLOSING:
if manager.display:
manager.display.fill((0,0,0))
pygame.display.flip()
self.shutdown()
elif self.State == STATE_CAPTURING:
# We are capturing a video file.
self.doFrameDump()
# Set the frame time for the next frame.
self.PlayTime = 1000.0 * self.PlayFrame / self.dumpFrameRate
self.PlayFrame += 1
def doResize(self, newSize):
# This will be called internally whenever the window is
# resized for any reason, either due to an application resize
# request being processed, or due to the user dragging the
# window handles.
pass
def doResizeBegin(self):
# This will be called internally before the screen is resized
# by pykmanager and doResize() is called. Not all players need
# to do anything here.
pass
def doResizeEnd(self):
# This will be called internally after the screen is resized
# by pykmanager and doResize() is called. Not all players need
# to do anything here.
pass
def handleEvent(self, event):
if event.type == pygame.USEREVENT:
self.Close()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.Close()
elif event.key == pygame.K_PAUSE or event.key == pygame.K_p:
self.Pause()
elif event.key == pygame.K_BACKSPACE or event.key == pygame.K_DELETE:
self.Rewind()
self.Play()
# Use control-left/right arrow to offset the current
# graphics time by 1/4 sec. Use control-down arrow to
# restore them to sync.
elif self.State == STATE_PLAYING and event.key == pygame.K_RIGHT and event.mod & (pygame.KMOD_LCTRL | pygame.KMOD_RCTRL):
manager.settings.SyncDelayMs += 250
print "sync %s" % manager.settings.SyncDelayMs
elif self.State == STATE_PLAYING and event.key == pygame.K_LEFT and event.mod & (pygame.KMOD_LCTRL | pygame.KMOD_RCTRL):
manager.settings.SyncDelayMs -= 250
print "sync %s" % manager.settings.SyncDelayMs
elif self.State == STATE_PLAYING and event.key == pygame.K_DOWN and event.mod & (pygame.KMOD_LCTRL | pygame.KMOD_RCTRL):
manager.settings.SyncDelayMs = 0
print "sync %s" % manager.settings.SyncDelayMs
if self.SupportsFontZoom:
if event.key == pygame.K_PLUS or event.key == pygame.K_EQUALS or \
event.key == pygame.K_KP_PLUS:
manager.ZoomFont(1.0/0.9)
elif event.key == pygame.K_MINUS or event.key == pygame.K_UNDERSCORE or \
event.key == pygame.K_KP_MINUS:
manager.ZoomFont(0.9)
elif event.type == pygame.QUIT:
self.Close()
elif env == ENV_GP2X and event.type == pygame.JOYBUTTONDOWN:
if event.button == GP2X_BUTTON_SELECT:
self.Close()
elif event.button == GP2X_BUTTON_START:
self.Pause()
elif event.button == GP2X_BUTTON_L:
self.ShoulderLHeld = True
elif event.button == GP2X_BUTTON_R:
self.ShoulderRHeld = True
if self.SupportsFontZoom:
if event.button == GP2X_BUTTON_RIGHT and self.ShoulderLHeld:
manager.ZoomFont(1.0/0.9)
elif event.button == GP2X_BUTTON_LEFT and self.ShoulderLHeld:
manager.ZoomFont(0.9)
elif env == ENV_GP2X and event.type == pygame.JOYBUTTONUP:
if event.button == GP2X_BUTTON_L:
self.ShoulderLHeld = False
elif event.button == GP2X_BUTTON_R:
self.ShoulderRHeld = False
def shutdown(self):
# This will be called by the pykManager to shut down the thing
# immediately.
# If the caller gave us a callback, let them know we're finished
if self.State != STATE_CLOSED:
self.State = STATE_CLOSED
if self.SongFinishedCallback != None:
self.SongFinishedCallback()
def __defaultErrorPrint(self, ErrorString):
print (ErrorString)
def findPygameFont(self, fontData, fontSize):
""" Returns a pygame.Font selected by this data. """
if not fontData.size:
# The font names a specific filename.
filename = fontData.name
if os.path.sep not in filename:
filename = os.path.join(manager.FontPath, filename)
return pygame.font.Font(filename, fontSize)
# The font names a system font.
pointSize = int(fontData.size * fontSize / 10.0 + 0.5)
return pygame.font.SysFont(
fontData.name, pointSize, bold = fontData.bold,
italic = fontData.italic)
|
kelvinlawson/pykaraoke
|
pykplayer.py
|
Python
|
lgpl-2.1
| 16,454 | 0.003343 |
# The Hazard Library
# Copyright (C) 2015, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import collections
import mock
import h5py
import numpy as np
from scipy.interpolate import interp1d
from openquake.hazardlib import const
from openquake.hazardlib.gsim.gsim_table import (
SitesContext, RuptureContext, DistancesContext, GMPETable,
AmplificationTable, hdf_arrays_to_dict)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
from openquake.hazardlib import imt as imt_module
BASE_DATA_PATH = os.path.join(os.path.dirname(__file__),
"data",
"gsimtables")
def midpoint(low, high, point=0.5):
"""
Returns the logarithmic midpoint between two value
"""
return 10.0 ** (point * (np.log10(low) + np.log10(high)))
class HDFArraysToDictTestCase(unittest.TestCase):
"""
Tests the conversion of a group containing a set of datasets(array) into
a dictionary
"""
def setUp(self):
self.fle = h5py.File("foo.hdf5")
self.group = self.fle.create_group("TestGroup")
dset1 = self.group.create_dataset("DSET1", (3, 3), dtype="f")
dset1[:] = np.zeros([3, 3])
dset2 = self.group.create_dataset("DSET2", (3, 3), dtype="f")
dset2[:] = np.ones([3, 3])
def test_array_conversion(self):
"""
Tests the simple array conversion
"""
# Setup two
expected_dset1 = np.zeros([3, 3])
expected_dset2 = np.ones([3, 3])
output_dict = hdf_arrays_to_dict(self.group)
assert isinstance(output_dict, dict)
self.assertIn("DSET1", output_dict)
self.assertIn("DSET2", output_dict)
np.testing.assert_array_almost_equal(output_dict["DSET1"],
expected_dset1)
np.testing.assert_array_almost_equal(output_dict["DSET2"],
expected_dset2)
def tearDown(self):
"""
Close and delete the hdf5 file
"""
self.fle.close()
os.remove("foo.hdf5")
class AmplificationTableSiteTestCase(unittest.TestCase):
"""
Tests the amplification tables for a site parameter
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH, "model_amplification_site.hdf5")
IDX = 0
def setUp(self):
"""
Open the hdf5 file
"""
self.fle = h5py.File(self.TABLE_FILE)
self.amp_table = AmplificationTable(self.fle["Amplification"],
self.fle["Mw"][:],
self.fle["Distances"][:])
def test_instantiation(self):
"""
Tests the setup and loading of data from file to memory
"""
# Check setup
# 1. Shape
self.assertTupleEqual(self.amp_table.shape, (10, 3, 5, 2))
# 2. Parameter
self.assertEqual(self.amp_table.parameter, "vs30")
# 3. Element
self.assertEqual(self.amp_table.element, "Sites")
# 4. Interpolation values
np.testing.assert_array_almost_equal(self.amp_table.values,
np.array([400.0, 1000.0]))
# 5. Periods
np.testing.assert_array_almost_equal(self.amp_table.periods,
np.array([0.1, 0.5, 1.0]))
# 6. Means and Standard Deviations
expected_mean, expected_sigma = self._build_mean_and_stddev_table()
for key in self.amp_table.mean:
np.testing.assert_array_almost_equal(self.amp_table.mean[key],
expected_mean[key])
np.testing.assert_array_almost_equal(
self.amp_table.sigma["Total"][key],
expected_sigma["Total"][key])
def _build_mean_and_stddev_table(self):
"""
Builds the expected mean and standard deviation tables
"""
expected_means = {
"PGA": np.ones([10, 1, 5, 2]),
"PGV": np.ones([10, 1, 5, 2]),
"SA": np.ones([10, 3, 5, 2])
}
# For second level revise values
expected_means["PGA"][:, :, :, self.IDX] *= 1.5
expected_means["PGV"][:, :, :, self.IDX] *= 0.5
expected_means["SA"][:, 0, :, self.IDX] *= 1.5
expected_means["SA"][:, 1, :, self.IDX] *= 2.0
expected_means["SA"][:, 2, :, self.IDX] *= 0.5
expected_sigma = {const.StdDev.TOTAL: {
"PGA": np.ones([10, 1, 5, 2]),
"PGV": np.ones([10, 1, 5, 2]),
"SA": np.ones([10, 3, 5, 2])
}}
expected_sigma[const.StdDev.TOTAL]["PGA"][:, :, :, self.IDX] *= 0.8
expected_sigma[const.StdDev.TOTAL]["PGV"][:, :, :, self.IDX] *= 0.8
expected_sigma[const.StdDev.TOTAL]["SA"][:, :, :, self.IDX] *= 0.8
return expected_means, expected_sigma
def test_get_set(self):
"""
Test that the set function operates correctly
"""
self.assertSetEqual(self.amp_table.get_set(), {"vs30"})
def test_get_mean_table(self, idx=0):
"""
Test the retrieval of the mean amplification tables for a given
magnitude and IMT
"""
rctx = RuptureContext()
rctx.mag = 6.0
# PGA
expected_table = np.ones([10, 2])
expected_table[:, self.IDX] *= 1.5
np.testing.assert_array_almost_equal(
self.amp_table.get_mean_table(imt_module.PGA(), rctx),
expected_table)
# SA
expected_table[:, self.IDX] = 2.0 * np.ones(10)
np.testing.assert_array_almost_equal(
self.amp_table.get_mean_table(imt_module.SA(0.5), rctx),
expected_table)
# SA (period interpolation)
interpolator = interp1d(np.log10(self.amp_table.periods),
np.log10(np.array([1.5, 2.0, 0.5])))
period = 0.3
expected_table[:, self.IDX] = (
10.0 ** interpolator(np.log10(period))) * np.ones(10.)
np.testing.assert_array_almost_equal(
self.amp_table.get_mean_table(imt_module.SA(period), rctx),
expected_table)
def test_get_sigma_table(self):
"""
Test the retrieval of the standard deviation modification tables
for a given magnitude and IMT
"""
rctx = RuptureContext()
rctx.mag = 6.0
# PGA
expected_table = np.ones([10, 2])
expected_table[:, self.IDX] *= 0.8
stddevs = ["Total"]
pga_table = self.amp_table.get_sigma_tables(imt_module.PGA(),
rctx,
stddevs)[0]
np.testing.assert_array_almost_equal(pga_table, expected_table)
# SA (for coverage)
sa_table = self.amp_table.get_sigma_tables(imt_module.SA(0.3),
rctx,
stddevs)[0]
np.testing.assert_array_almost_equal(sa_table, expected_table)
def test_get_amplification_factors(self):
"""
Tests the amplification tables
"""
rctx = RuptureContext()
rctx.mag = 6.0
dctx = DistancesContext()
# Takes distances at the values found in the table (not checking
# distance interpolation)
dctx.rjb = np.copy(self.amp_table.distances[:, 0, 0])
# Test Vs30 is 700.0 m/s midpoint between the 400 m/s and 1000 m/s
# specified in the table
sctx = SitesContext()
sctx.vs30 = 700.0 * np.ones_like(dctx.rjb)
stddevs = [const.StdDev.TOTAL]
expected_mean = np.ones_like(dctx.rjb)
expected_sigma = np.ones_like(dctx.rjb)
# Check PGA and PGV
mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
imt_module.PGA(), sctx, rctx, dctx.rjb, stddevs)
np.testing.assert_array_almost_equal(
mean_amp,
midpoint(1.0, 1.5) * expected_mean)
np.testing.assert_array_almost_equal(
sigma_amp[0],
0.9 * expected_mean)
mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
imt_module.PGV(), sctx, rctx, dctx.rjb, stddevs)
np.testing.assert_array_almost_equal(
mean_amp,
midpoint(1.0, 0.5) * expected_mean)
np.testing.assert_array_almost_equal(
sigma_amp[0],
0.9 * expected_mean)
# Sa (0.5)
mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
imt_module.SA(0.5), sctx, rctx, dctx.rjb, stddevs)
np.testing.assert_array_almost_equal(
mean_amp,
midpoint(1.0, 2.0) * expected_mean)
np.testing.assert_array_almost_equal(
sigma_amp[0],
0.9 * expected_mean)
def tearDown(self):
"""
Close the hdf5 file
"""
self.fle.close()
class AmplificationTableRuptureTestCase(AmplificationTableSiteTestCase):
"""
Test case for the amplification table when applied to a rupture specific
parameter
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH, "model_amplification_rake.hdf5")
IDX = 1
def test_instantiation(self):
"""
Tests the setup and loading of data from file to memory
"""
# Check setup
# 1. Shape
self.assertTupleEqual(self.amp_table.shape, (10, 3, 5, 2))
# 2. Parameter
self.assertEqual(self.amp_table.parameter, "rake")
# 3. Element
self.assertEqual(self.amp_table.element, "Rupture")
# 4. Interpolation values
np.testing.assert_array_almost_equal(self.amp_table.values,
np.array([0.0, 90.0]))
# 5. Periods
np.testing.assert_array_almost_equal(self.amp_table.periods,
np.array([0.1, 0.5, 1.0]))
# 6. Means and Standard Deviations
expected_mean, expected_sigma = self._build_mean_and_stddev_table()
for key in self.amp_table.mean:
np.testing.assert_array_almost_equal(self.amp_table.mean[key],
expected_mean[key])
np.testing.assert_array_almost_equal(
self.amp_table.sigma["Total"][key],
expected_sigma["Total"][key])
def test_get_amplification_factors(self):
"""
Tests the amplification tables
"""
rctx = RuptureContext()
rctx.rake = 45.0
rctx.mag = 6.0
dctx = DistancesContext()
# Takes distances at the values found in the table (not checking
# distance interpolation)
dctx.rjb = np.copy(self.amp_table.distances[:, 0, 0])
# Test Vs30 is 700.0 m/s midpoint between the 400 m/s and 1000 m/s
# specified in the table
sctx = SitesContext()
stddevs = [const.StdDev.TOTAL]
expected_mean = np.ones_like(dctx.rjb)
# Check PGA and PGV
mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
imt_module.PGA(), sctx, rctx, dctx.rjb, stddevs)
np.testing.assert_array_almost_equal(
mean_amp,
midpoint(1.0, 1.5) * expected_mean)
np.testing.assert_array_almost_equal(
sigma_amp[0],
0.9 * expected_mean)
mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
imt_module.PGV(), sctx, rctx, dctx.rjb, stddevs)
np.testing.assert_array_almost_equal(
mean_amp,
midpoint(1.0, 0.5) * expected_mean)
np.testing.assert_array_almost_equal(
sigma_amp[0],
0.9 * expected_mean)
# Sa (0.5)
mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
imt_module.SA(0.5), sctx, rctx, dctx.rjb, stddevs)
np.testing.assert_array_almost_equal(
mean_amp,
midpoint(1.0, 2.0) * expected_mean)
np.testing.assert_array_almost_equal(
sigma_amp[0],
0.9 * expected_mean)
def test_get_set(self):
"""
Test that the set function operates correctly
"""
self.assertSetEqual(self.amp_table.get_set(), set(("rake",)))
class AmplificationTableBadTestCase(unittest.TestCase):
"""
Tests the instantiation of the amplification table if a non-supported
parameter is used
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH, "bad_table_parameter.hdf5")
IDX = 0
def setUp(self):
"""
Open the hdf5 file
"""
self.fle = h5py.File(self.TABLE_FILE)
def test_unsupported_parameter(self):
"""
Tests instantiation with a bad input
"""
with self.assertRaises(ValueError) as ve:
AmplificationTable(self.fle["Amplification"], None, None)
self.assertEqual(str(ve.exception),
"Amplification parameter Bad Value not recognised!")
def tearDown(self):
"""
Close the file
"""
self.fle.close()
class GSIMTableGoodTestCase(unittest.TestCase):
"""
Verifies the correct execution of a GMPE Table
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH, "good_dummy_table.hdf5")
def setUp(self):
"""
Opens the hdf5 file
"""
self.fle = h5py.File(self.TABLE_FILE)
def test_correct_instantiation(self):
"""
Verify that the data is loaded successfully
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
np.testing.assert_array_almost_equal(gsim.distances,
self.fle["Distances"][:])
np.testing.assert_array_almost_equal(gsim.m_w,
self.fle["Mw"][:])
self.assertEqual(gsim.distance_type, "rjb")
self.assertSetEqual(gsim.REQUIRES_SITES_PARAMETERS, set(("vs30",)))
self.assertSetEqual(gsim.REQUIRES_DISTANCES, set(("rjb",)))
self.assertSetEqual(
gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPES,
set((imt_module.PGA, imt_module.PGV, imt_module.SA)))
self.assertSetEqual(gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES,
set((const.StdDev.TOTAL,)))
# Verify correctly parsed IMLs and standard deviations
for iml in ["PGA", "PGV", "SA", "T"]:
np.testing.assert_array_almost_equal(
gsim.imls[iml],
self.fle["IMLs/" + iml][:])
np.testing.assert_array_almost_equal(
gsim.stddevs["Total"][iml],
self.fle["Total/" + iml][:])
def test_instantiation_without_file(self):
"""
Tests the case when no GMPE table file is coded into the GMPE, nor
is any provided - should raise an error
"""
with self.assertRaises(IOError) as ioe:
GMPETable(gmpe_table=None)
self.assertEqual(str(ioe.exception),
"GMPE Table Not Defined!")
def test_retreival_tables_good_no_interp(self):
"""
Tests the retreival of the IML tables for 'good' conditions without
applying magnitude interpolations
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
# PGA
np.testing.assert_array_almost_equal(
gsim._return_tables(6.0, imt_module.PGA(), "IMLs"),
np.array([2., 1., 0.5]))
# PGV
np.testing.assert_array_almost_equal(
gsim._return_tables(6.0, imt_module.PGV(), "IMLs"),
np.array([20., 10., 5.]),
5)
# SA(1.0)
np.testing.assert_array_almost_equal(
gsim._return_tables(6.0, imt_module.SA(1.0), "IMLs"),
np.array([2.0, 1., 0.5]))
# Also for standard deviations
np.testing.assert_array_almost_equal(
gsim._return_tables(6.0, imt_module.PGA(), "Total"),
0.5 * np.ones(3))
np.testing.assert_array_almost_equal(
gsim._return_tables(6.0, imt_module.SA(1.0), "Total"),
0.8 * np.ones(3))
def test_retreival_tables_good_interp(self):
"""
Tests the retreival of the IML tables for 'good' conditions with
magnitude interpolations
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
expected_table_pgv = np.array([midpoint(20., 40.),
midpoint(10., 20.),
midpoint(5., 10.)])
np.testing.assert_array_almost_equal(
gsim._return_tables(6.5, imt_module.PGV(), "IMLs"),
expected_table_pgv,
5)
expected_table_sa1 = np.array([midpoint(2., 4.),
midpoint(1., 2.),
midpoint(0.5, 1.)])
np.testing.assert_array_almost_equal(
gsim._return_tables(6.5, imt_module.SA(1.0), "IMLs"),
expected_table_sa1)
def test_retreival_tables_outside_mag_range(self):
"""
Tests that an error is raised when inputting a magnitude value
outside the supported range
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
with self.assertRaises(ValueError) as ve:
gsim._return_tables(7.5, imt_module.PGA(), "IMLs")
self.assertEqual(
str(ve.exception),
"Magnitude 7.50 outside of supported range (5.00 to 7.00)")
def test_retreival_tables_outside_period_range(self):
"""
Tests that an error is raised when inputting a period value
outside the supported range
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
with self.assertRaises(ValueError) as ve:
gsim._return_tables(6.0, imt_module.SA(2.5), "IMLs")
self.assertEqual(
str(ve.exception),
"Spectral period 2.500 outside of valid range (0.100 to 2.000)")
def test_get_mean_and_stddevs_good(self):
"""
Tests the full execution of the GMPE tables for valid data
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
rctx = RuptureContext()
rctx.mag = 6.0
dctx = DistancesContext()
# Test values at the given distances and those outside range
dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
sctx = SitesContext()
sctx.vs30 = 1000. * np.ones(5)
stddevs = [const.StdDev.TOTAL]
expected_mean = np.array([2.0, 2.0, 1.0, 0.5, 1.0E-20])
expected_sigma = 0.25 * np.ones(5)
# PGA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.PGA(),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], expected_sigma, 5)
# SA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.SA(1.0),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], 0.4 * np.ones(5), 5)
# PGV
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.PGV(),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean),
10. * expected_mean,
5)
np.testing.assert_array_almost_equal(sigma[0], expected_sigma, 5)
def test_get_mean_and_stddevs_good_amplified(self):
"""
Tests the full execution of the GMPE tables for valid data with
amplification
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
rctx = RuptureContext()
rctx.mag = 6.0
dctx = DistancesContext()
# Test values at the given distances and those outside range
dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
sctx = SitesContext()
sctx.vs30 = 100. * np.ones(5)
stddevs = [const.StdDev.TOTAL]
expected_mean = np.array([20., 20., 10., 5., 1.0E-19])
expected_sigma = 0.25 * np.ones(5)
# PGA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.PGA(),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], expected_sigma, 5)
# SA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.SA(1.0),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], 0.4 * np.ones(5), 5)
def test_get_mean_stddevs_unsupported_stddev(self):
"""
Tests the execution of the GMPE with an unsupported standard deviation
type
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
rctx = RuptureContext()
rctx.mag = 6.0
dctx = DistancesContext()
# Test values at the given distances and those outside range
dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
sctx = SitesContext()
sctx.vs30 = 1000. * np.ones(5)
stddevs = [const.StdDev.TOTAL, const.StdDev.INTER_EVENT]
with self.assertRaises(ValueError) as ve:
gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt_module.PGA(),
stddevs)
self.assertEqual(str(ve.exception),
"Standard Deviation type Inter event not supported")
def tearDown(self):
"""
Close the hdf5 file
"""
self.fle.close()
class GSIMTableTestCaseMultiStdDev(unittest.TestCase):
"""
Tests the instantiation of the GSIM table class in the case when
i. Multiple Standard Deviations are specified
ii. An unrecognised IMT is input
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH,
"good_dummy_table_multi_stddev.hdf5")
def test_instantiation(self):
"""
Runs both instantiation checks
The table file contains data for Inter and intra event standard
deviation, as well as an IMT that is not recognised by OpenQuake
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
expected_stddev_set = set((const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT))
self.assertSetEqual(gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES,
expected_stddev_set)
expected_imt_set = set((imt_module.PGA,
imt_module.PGV,
imt_module.SA))
self.assertSetEqual(gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPES,
expected_imt_set)
class GSIMTableTestCaseRupture(unittest.TestCase):
"""
Tests the case when the amplification is based on a rupture parameter
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH,
"good_dummy_table_rake.hdf5")
def test_instantiation(self):
"""
Tests instantiation of class
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
expected_rupture_set = set(("mag", "rake"))
self.assertSetEqual(gsim.REQUIRES_RUPTURE_PARAMETERS,
expected_rupture_set)
self.assertEqual(gsim.amplification.parameter, "rake")
self.assertEqual(gsim.amplification.element, "Rupture")
self.assertSetEqual(gsim.REQUIRES_SITES_PARAMETERS, set(()))
def test_get_mean_and_stddevs_good(self):
"""
Tests the full execution of the GMPE tables for valid data
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
rctx = RuptureContext()
rctx.mag = 6.0
rctx.rake = 90.0
dctx = DistancesContext()
# Test values at the given distances and those outside range
dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
sctx = SitesContext()
stddevs = [const.StdDev.TOTAL]
expected_mean = np.array([20.0, 20.0, 10.0, 5.0, 1.0E-19])
# PGA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.PGA(),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], 0.25 * np.ones(5), 5)
# SA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.SA(1.0),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], 0.4 * np.ones(5), 5)
class GSIMTableTestCaseBadFile(unittest.TestCase):
"""
Tests the case when the hdf5 file contains spectral accelerations but
is missing the periods
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH,
"missing_periods.hdf5")
def test_missing_periods(self):
"""
Tests missing period information
"""
with self.assertRaises(ValueError) as ve:
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
self.assertEqual(str(ve.exception),
"Spectral Acceleration must be accompanied by periods"
)
class GSIMTableTestCaseNoAmplification(unittest.TestCase):
"""
Tests the simple case in which no amplification is applied
"""
TABLE_FILE = os.path.join(BASE_DATA_PATH,
"good_dummy_table_noamp.hdf5")
def test_instantiation(self):
"""
Tests instantiation without amplification
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
self.assertIsNone(gsim.amplification)
self.assertSetEqual(gsim.REQUIRES_SITES_PARAMETERS, set(()))
self.assertSetEqual(gsim.REQUIRES_RUPTURE_PARAMETERS, set(("mag",)))
def test_get_mean_and_stddevs(self):
"""
Tests mean and standard deviations without amplification
"""
gsim = GMPETable(gmpe_table=self.TABLE_FILE)
rctx = RuptureContext()
rctx.mag = 6.0
dctx = DistancesContext()
# Test values at the given distances and those outside range
dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
sctx = SitesContext()
stddevs = [const.StdDev.TOTAL]
expected_mean = np.array([2.0, 2.0, 1.0, 0.5, 1.0E-20])
# PGA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.PGA(),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5)
# SA
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.SA(1.0),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
np.testing.assert_array_almost_equal(sigma[0], 0.8 * np.ones(5), 5)
# PGV
mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
imt_module.PGV(),
stddevs)
np.testing.assert_array_almost_equal(np.exp(mean),
10. * expected_mean,
5)
np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5)
class GSIMTableQATestCase(BaseGSIMTestCase):
"""
Quality Assurance test case with real data taken from the
2015 Canadian National Seismic Hazard Map
"""
GSIM_CLASS = GMPETable
MEAN_FILE = "gsimtables/Wcrust_rjb_med_MEAN.csv"
STD_TOTAL_FILE = "gsimtables/Wcrust_rjb_med_TOTAL.csv"
def setUp(self):
self.GSIM_CLASS.GMPE_TABLE = os.path.join(BASE_DATA_PATH,
"Wcrust_rjb_med.hdf5")
def test_mean(self):
self.check(self.MEAN_FILE, max_discrep_percentage=0.7)
def test_std_total(self):
self.check(self.STD_TOTAL_FILE, max_discrep_percentage=0.7)
def tearDown(self):
self.GSIM_CLASS.GMPE_TABLE = None
|
g-weatherill/oq-hazardlib
|
openquake/hazardlib/tests/gsim/gsim_table_test.py
|
Python
|
agpl-3.0
| 29,750 | 0 |
from .chucky_neighborhood_tool import NeighborhoodTool
|
a0x77n/chucky-tools
|
src/chucky_tools/neighborhood/__init__.py
|
Python
|
gpl-3.0
| 55 | 0 |
#!/usr/bin/env python
# example setselection.py
import pygtk
pygtk.require('2.0')
import gtk
import time
class SetSelectionExample:
# Callback when the user toggles the selection
def selection_toggled(self, widget, window):
if widget.get_active():
self.have_selection = window.selection_owner_set("PRIMARY")
# if claiming the selection failed, we return the button to
# the out state
if not self.have_selection:
widget.set_active(False)
else:
if self.have_selection:
# Not possible to release the selection in PyGTK
# just mark that we don't have it
self.have_selection = False
return
# Called when another application claims the selection
def selection_clear(self, widget, event):
self.have_selection = False
widget.set_active(False)
return True
# Supplies the current time as the selection.
def selection_handle(self, widget, selection_data, info, time_stamp):
current_time = time.time()
timestr = time.asctime(time.localtime(current_time))
# When we return a single string, it should not be null terminated.
# That will be done for us
selection_data.set_text(timestr, len(timestr))
return
def __init__(self):
self.have_selection = False
# Create the toplevel window
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("Set Selection")
window.set_border_width(10)
window.connect("destroy", lambda w: gtk.main_quit())
self.window = window
# Create an eventbox to hold the button since it no longer has
# a GdkWindow
eventbox = gtk.EventBox()
eventbox.show()
window.add(eventbox)
# Create a toggle button to act as the selection
selection_button = gtk.ToggleButton("Claim Selection")
eventbox.add(selection_button)
selection_button.connect("toggled", self.selection_toggled, eventbox)
eventbox.connect_object("selection_clear_event", self.selection_clear,
selection_button)
eventbox.selection_add_target("PRIMARY", "STRING", 1)
eventbox.selection_add_target("PRIMARY", "COMPOUND_TEXT", 1)
eventbox.connect("selection_get", self.selection_handle)
selection_button.show()
window.show()
def main():
gtk.main()
return 0
if __name__ == "__main__":
SetSelectionExample()
main()
|
certik/pyjamas
|
pygtkweb/demos/065-setselection.py
|
Python
|
apache-2.0
| 2,570 | 0.002335 |
import git
def download_repos(dst, repos):
"""
handles downloading paginated gists
Arguments
---------
dst : string
folder to write repositories to
repos : array-like of git.repo.base.Repo
repositories to download
"""
for repo in repos:
try:
# we can clone the projects, but we only want to download small projects
print("downloading repo {}/{}".format(repo.owner.login, repo.name))
git.repo.base.Repo.clone_from(repo.git_url, dst + '/' + repo.full_name)
except git.exc.GitCommandError as e:
# if the repositority already exists we want to move on
if not e.stderr.endswith('already exists and is not an empty directory.\n\''):
raise e
|
cameres/github-dl
|
download/download_repos.py
|
Python
|
mpl-2.0
| 779 | 0.003851 |
# -*- coding: utf-8 -*-
"""
===========
TaskCarrier
===========
:mod:`taskcarrier` contains a set of tools built on top of the `joblib`
library which allow to use transparently Parallel/Serial code using
simple but nice abstraction.
"""
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
__version__ = '1.0'
__date__ = "26 Mar. 2015"
from .taskcarrier import (BoundedIterable, bound_iterable, Partition, Mapper,
SerialMapper, StaticParallelMapper,
DynamicParallelMapper, MapperInstance)
__all__ = ["BoundedIterable", "bound_iterable", "Partition", "Mapper",
"SerialMapper", "StaticParallelMapper", "DynamicParallelMapper",
"MapperInstance"]
|
jm-begon/taskcarrier
|
taskcarrier/__init__.py
|
Python
|
bsd-3-clause
| 764 | 0.001309 |
from __future__ import unicode_literals
import json
import xmltodict
from jinja2 import Template
from six import iteritems
from moto.core.responses import BaseResponse
from .models import redshift_backends
def convert_json_error_to_xml(json_error):
error = json.loads(json_error)
code = error["Error"]["Code"]
message = error["Error"]["Message"]
template = Template(
"""
<RedshiftClientError>
<Error>
<Code>{{ code }}</Code>
<Message>{{ message }}</Message>
<Type>Sender</Type>
</Error>
<RequestId>6876f774-7273-11e4-85dc-39e55ca848d1</RequestId>
</RedshiftClientError>"""
)
return template.render(code=code, message=message)
def itemize(data):
"""
The xmltodict.unparse requires we modify the shape of the input dictionary slightly. Instead of a dict of the form:
{'key': ['value1', 'value2']}
We must provide:
{'key': {'item': ['value1', 'value2']}}
"""
if isinstance(data, dict):
ret = {}
for key in data:
ret[key] = itemize(data[key])
return ret
elif isinstance(data, list):
return {"item": [itemize(value) for value in data]}
else:
return data
class RedshiftResponse(BaseResponse):
@property
def redshift_backend(self):
return redshift_backends[self.region]
def get_response(self, response):
if self.request_json:
return json.dumps(response)
else:
xml = xmltodict.unparse(itemize(response), full_document=False)
if hasattr(xml, "decode"):
xml = xml.decode("utf-8")
return xml
def call_action(self):
status, headers, body = super(RedshiftResponse, self).call_action()
if status >= 400 and not self.request_json:
body = convert_json_error_to_xml(body)
return status, headers, body
def unpack_complex_list_params(self, label, names):
unpacked_list = list()
count = 1
while self._get_param("{0}.{1}.{2}".format(label, count, names[0])):
param = dict()
for i in range(len(names)):
param[names[i]] = self._get_param(
"{0}.{1}.{2}".format(label, count, names[i])
)
unpacked_list.append(param)
count += 1
return unpacked_list
def unpack_list_params(self, label):
unpacked_list = list()
count = 1
while self._get_param("{0}.{1}".format(label, count)):
unpacked_list.append(self._get_param("{0}.{1}".format(label, count)))
count += 1
return unpacked_list
def _get_cluster_security_groups(self):
cluster_security_groups = self._get_multi_param("ClusterSecurityGroups.member")
if not cluster_security_groups:
cluster_security_groups = self._get_multi_param(
"ClusterSecurityGroups.ClusterSecurityGroupName"
)
return cluster_security_groups
def _get_vpc_security_group_ids(self):
vpc_security_group_ids = self._get_multi_param("VpcSecurityGroupIds.member")
if not vpc_security_group_ids:
vpc_security_group_ids = self._get_multi_param(
"VpcSecurityGroupIds.VpcSecurityGroupId"
)
return vpc_security_group_ids
def _get_iam_roles(self):
iam_roles = self._get_multi_param("IamRoles.member")
if not iam_roles:
iam_roles = self._get_multi_param("IamRoles.IamRoleArn")
return iam_roles
def _get_subnet_ids(self):
subnet_ids = self._get_multi_param("SubnetIds.member")
if not subnet_ids:
subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
return subnet_ids
def create_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"node_type": self._get_param("NodeType"),
"master_username": self._get_param("MasterUsername"),
"master_user_password": self._get_param("MasterUserPassword"),
"db_name": self._get_param("DBName"),
"cluster_type": self._get_param("ClusterType"),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"availability_zone": self._get_param("AvailabilityZone"),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"port": self._get_int_param("Port"),
"cluster_version": self._get_param("ClusterVersion"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"number_of_nodes": self._get_int_param("NumberOfNodes"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"region_name": self.region,
"tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
"iam_roles_arn": self._get_iam_roles(),
"enhanced_vpc_routing": self._get_param("EnhancedVpcRouting"),
"kms_key_id": self._get_param("KmsKeyId"),
}
cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json()
cluster["ClusterStatus"] = "creating"
return self.get_response(
{
"CreateClusterResponse": {
"CreateClusterResult": {"Cluster": cluster},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def restore_from_cluster_snapshot(self):
enhanced_vpc_routing = self._get_bool_param("EnhancedVpcRouting")
restore_kwargs = {
"snapshot_identifier": self._get_param("SnapshotIdentifier"),
"cluster_identifier": self._get_param("ClusterIdentifier"),
"port": self._get_int_param("Port"),
"availability_zone": self._get_param("AvailabilityZone"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"region_name": self.region,
"iam_roles_arn": self._get_iam_roles(),
}
if enhanced_vpc_routing is not None:
restore_kwargs["enhanced_vpc_routing"] = enhanced_vpc_routing
cluster = self.redshift_backend.restore_from_cluster_snapshot(
**restore_kwargs
).to_json()
cluster["ClusterStatus"] = "creating"
return self.get_response(
{
"RestoreFromClusterSnapshotResponse": {
"RestoreFromClusterSnapshotResult": {"Cluster": cluster},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_clusters(self):
cluster_identifier = self._get_param("ClusterIdentifier")
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
return self.get_response(
{
"DescribeClustersResponse": {
"DescribeClustersResult": {
"Clusters": [cluster.to_json() for cluster in clusters]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def modify_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"new_cluster_identifier": self._get_param("NewClusterIdentifier"),
"node_type": self._get_param("NodeType"),
"master_user_password": self._get_param("MasterUserPassword"),
"cluster_type": self._get_param("ClusterType"),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"cluster_version": self._get_param("ClusterVersion"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"number_of_nodes": self._get_int_param("NumberOfNodes"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"iam_roles_arn": self._get_iam_roles(),
"enhanced_vpc_routing": self._get_param("EnhancedVpcRouting"),
}
cluster_kwargs = {}
# We only want parameters that were actually passed in, otherwise
# we'll stomp all over our cluster metadata with None values.
for (key, value) in iteritems(request_kwargs):
if value is not None and value != []:
cluster_kwargs[key] = value
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
return self.get_response(
{
"ModifyClusterResponse": {
"ModifyClusterResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param(
"FinalClusterSnapshotIdentifier"
),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot"),
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response(
{
"DeleteClusterResponse": {
"DeleteClusterResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_cluster_subnet_group(self):
cluster_subnet_group_name = self._get_param("ClusterSubnetGroupName")
description = self._get_param("Description")
subnet_ids = self._get_subnet_ids()
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
subnet_group = self.redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=cluster_subnet_group_name,
description=description,
subnet_ids=subnet_ids,
region_name=self.region,
tags=tags,
)
return self.get_response(
{
"CreateClusterSubnetGroupResponse": {
"CreateClusterSubnetGroupResult": {
"ClusterSubnetGroup": subnet_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_subnet_groups(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(
subnet_identifier
)
return self.get_response(
{
"DescribeClusterSubnetGroupsResponse": {
"DescribeClusterSubnetGroupsResult": {
"ClusterSubnetGroups": [
subnet_group.to_json() for subnet_group in subnet_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_subnet_group(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
return self.get_response(
{
"DeleteClusterSubnetGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def create_cluster_security_group(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
description = self._get_param("Description")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
security_group = self.redshift_backend.create_cluster_security_group(
cluster_security_group_name=cluster_security_group_name,
description=description,
region_name=self.region,
tags=tags,
)
return self.get_response(
{
"CreateClusterSecurityGroupResponse": {
"CreateClusterSecurityGroupResult": {
"ClusterSecurityGroup": security_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_security_groups(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
security_groups = self.redshift_backend.describe_cluster_security_groups(
cluster_security_group_name
)
return self.get_response(
{
"DescribeClusterSecurityGroupsResponse": {
"DescribeClusterSecurityGroupsResult": {
"ClusterSecurityGroups": [
security_group.to_json()
for security_group in security_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_security_group(self):
security_group_identifier = self._get_param("ClusterSecurityGroupName")
self.redshift_backend.delete_cluster_security_group(security_group_identifier)
return self.get_response(
{
"DeleteClusterSecurityGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def authorize_cluster_security_group_ingress(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
cidr_ip = self._get_param("CIDRIP")
security_group = self.redshift_backend.authorize_cluster_security_group_ingress(
cluster_security_group_name, cidr_ip
)
return self.get_response(
{
"AuthorizeClusterSecurityGroupIngressResponse": {
"AuthorizeClusterSecurityGroupIngressResult": {
"ClusterSecurityGroup": {
"ClusterSecurityGroupName": cluster_security_group_name,
"Description": security_group.description,
"IPRanges": [
{
"Status": "authorized",
"CIDRIP": cidr_ip,
"Tags": security_group.tags,
},
],
}
}
}
}
)
def create_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
group_family = self._get_param("ParameterGroupFamily")
description = self._get_param("Description")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
parameter_group = self.redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name, group_family, description, self.region, tags
)
return self.get_response(
{
"CreateClusterParameterGroupResponse": {
"CreateClusterParameterGroupResult": {
"ClusterParameterGroup": parameter_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_parameter_groups(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(
cluster_parameter_group_name
)
return self.get_response(
{
"DescribeClusterParameterGroupsResponse": {
"DescribeClusterParameterGroupsResult": {
"ParameterGroups": [
parameter_group.to_json()
for parameter_group in parameter_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
self.redshift_backend.delete_cluster_parameter_group(
cluster_parameter_group_name
)
return self.get_response(
{
"DeleteClusterParameterGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def create_cluster_snapshot(self):
cluster_identifier = self._get_param("ClusterIdentifier")
snapshot_identifier = self._get_param("SnapshotIdentifier")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.redshift_backend.create_cluster_snapshot(
cluster_identifier, snapshot_identifier, self.region, tags
)
return self.get_response(
{
"CreateClusterSnapshotResponse": {
"CreateClusterSnapshotResult": {"Snapshot": snapshot.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_snapshots(self):
cluster_identifier = self._get_param("ClusterIdentifier")
snapshot_identifier = self._get_param("SnapshotIdentifier")
snapshots = self.redshift_backend.describe_cluster_snapshots(
cluster_identifier, snapshot_identifier
)
return self.get_response(
{
"DescribeClusterSnapshotsResponse": {
"DescribeClusterSnapshotsResult": {
"Snapshots": [snapshot.to_json() for snapshot in snapshots]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_snapshot(self):
snapshot_identifier = self._get_param("SnapshotIdentifier")
snapshot = self.redshift_backend.delete_cluster_snapshot(snapshot_identifier)
return self.get_response(
{
"DeleteClusterSnapshotResponse": {
"DeleteClusterSnapshotResult": {"Snapshot": snapshot.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_snapshot_copy_grant(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName"),
"kms_key_id": self._get_param("KmsKeyId"),
"region_name": self._get_param("Region"),
}
copy_grant = self.redshift_backend.create_snapshot_copy_grant(
**copy_grant_kwargs
)
return self.get_response(
{
"CreateSnapshotCopyGrantResponse": {
"CreateSnapshotCopyGrantResult": {
"SnapshotCopyGrant": copy_grant.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_snapshot_copy_grant(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName")
}
self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs)
return self.get_response(
{
"DeleteSnapshotCopyGrantResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def describe_snapshot_copy_grants(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName")
}
copy_grants = self.redshift_backend.describe_snapshot_copy_grants(
**copy_grant_kwargs
)
return self.get_response(
{
"DescribeSnapshotCopyGrantsResponse": {
"DescribeSnapshotCopyGrantsResult": {
"SnapshotCopyGrants": [
copy_grant.to_json() for copy_grant in copy_grants
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_tags(self):
resource_name = self._get_param("ResourceName")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
self.redshift_backend.create_tags(resource_name, tags)
return self.get_response(
{
"CreateTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def describe_tags(self):
resource_name = self._get_param("ResourceName")
resource_type = self._get_param("ResourceType")
tagged_resources = self.redshift_backend.describe_tags(
resource_name, resource_type
)
return self.get_response(
{
"DescribeTagsResponse": {
"DescribeTagsResult": {"TaggedResources": tagged_resources},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_tags(self):
resource_name = self._get_param("ResourceName")
tag_keys = self.unpack_list_params("TagKeys.TagKey")
self.redshift_backend.delete_tags(resource_name, tag_keys)
return self.get_response(
{
"DeleteTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def enable_snapshot_copy(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"destination_region": self._get_param("DestinationRegion"),
"retention_period": self._get_param("RetentionPeriod", 7),
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName"),
}
cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response(
{
"EnableSnapshotCopyResponse": {
"EnableSnapshotCopyResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def disable_snapshot_copy(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier")
}
cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response(
{
"DisableSnapshotCopyResponse": {
"DisableSnapshotCopyResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def modify_snapshot_copy_retention_period(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"retention_period": self._get_param("RetentionPeriod"),
}
cluster = self.redshift_backend.modify_snapshot_copy_retention_period(
**snapshot_copy_kwargs
)
return self.get_response(
{
"ModifySnapshotCopyRetentionPeriodResponse": {
"ModifySnapshotCopyRetentionPeriodResult": {
"Clusters": [cluster.to_json()]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def get_cluster_credentials(self):
cluster_identifier = self._get_param("ClusterIdentifier")
db_user = self._get_param("DbUser")
auto_create = self._get_bool_param("AutoCreate", False)
duration_seconds = self._get_int_param("DurationSeconds", 900)
cluster_credentials = self.redshift_backend.get_cluster_credentials(
cluster_identifier, db_user, auto_create, duration_seconds
)
return self.get_response(
{
"GetClusterCredentialsResponse": {
"GetClusterCredentialsResult": cluster_credentials,
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
|
william-richard/moto
|
moto/redshift/responses.py
|
Python
|
apache-2.0
| 28,522 | 0.001227 |
"""
XmlObject
This module allows concise definitions of XML file formats for python
objects.
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@alum.mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import sys
import codecs
# xml imports
import xml.parsers.expat
from xml.sax.saxutils import escape
# keepnote imports
from keepnote import safefile
class XmlError (StandardError):
"""Error for parsing XML"""
pass
def bool2str(b):
"""Convert a bool into a string"""
return str(int(b))
def str2bool(s):
"""Convert a string into a bool"""
return bool(int(s))
def str_no_none(x):
if x is None:
return u""
return x
class Tag (object):
def __init__(self, name,
get=None,
set=None,
attr=None,
tags=[]):
self.name = name
self._tag_list = list(tags)
self._read_data = get
self._write_data = set
self._object = None
self._data = []
# set read/write based on 'attr'
if attr is not None:
attr_name, attr_get, attr_set = attr
if attr_get is None:
self._read_data = lambda s,x: s.__setattr__(attr_name, x)
else:
self._read_data = lambda s,x: s.__setattr__(attr_name, attr_get(x))
if attr_set is None:
self._write_data = lambda s: str_no_none(s.__dict__[attr_name])
else:
self._write_data = lambda s: attr_set(s.__dict__[attr_name])
# init tag lookup
self._tags = {}
for tag in tags:
self._tags[tag.name] = tag
# set of initialized tags
self._init_tags = __builtins__["set"]()
#===========================================
# reading
def init(self):
"""Initialize the a tag before its first use"""
self._init_tags.clear()
def set_object(self, obj):
self._object = obj
def new_tag(self, name):
"""Create new child tag"""
tag = self._tags.get(name, None)
if tag:
# initialize new tag
tag.set_object(self._object)
if tag not in self._init_tags:
tag.init()
self._init_tags.add(tag)
return tag
def start_tag(self):
"""Start tag callback"""
self._data = []
def queue_data(self, data):
"""Content data callback"""
if self._read_data:
self._data.append(data)
def end_tag(self):
"""End tag callback"""
# read queued data if read function is supplied
if self._read_data:
data = "".join(self._data)
self._data = []
try:
self._read_data(self._object, data)
except Exception, e:
raise XmlError("Error parsing tag '%s': %s" % (self.name,
str(e)))
def add(self, tag):
"""Add a tag child to this tag"""
self._tag_list.append(tag)
self._tags[tag.name] = tag
#===================
# writing
def write(self, obj, out):
"""Write tag to output stream"""
# write openning
if self.name != "":
out.write("<%s>" % self.name)
if len(self._tags) > 0:
out.write("\n")
for child_tag in self._tag_list:
child_tag.write(obj, out)
elif self._write_data:
text = self._write_data(obj)
if not isinstance(text, basestring):
raise XmlError("bad text (%s,%s): %s" %
(self.name, str(self._object),
str(type(text))))
out.write(escape(text))
if self.name != "":
out.write("</%s>\n" % self.name)
# TODO: remove get?
class TagMany (Tag):
def __init__(self, name, iterfunc, get=None, set=None,
before=None,
after=None,
tags=[]):
Tag.__init__(self, name,
get=None,
set=set,
tags=tags)
self._iterfunc = iterfunc
self._read_item = get
self._write_item = set
self._beforefunc = before
self._afterfunc = after
self._index = 0
#=============================
# reading
def init(self):
"""Initialize the a tag before its first use"""
self._init_tags.clear()
self._index = 0
def new_tag(self, name):
"""Create new child tag"""
tag = self._tags.get(name, None)
if tag:
# initialize new tag
tag.set_object((self._object, self._index))
if tag not in self._init_tags:
tag.init()
self._init_tags.add(tag)
return tag
def start_tag(self):
"""Start tag callback"""
self._data = []
if self._beforefunc:
self._beforefunc((self._object, self._index))
def queue_data(self, data):
"""Content data callback"""
if self._read_item:
self._data.append(data)
def end_tag(self):
"""End tag callback"""
if self._read_item:
data = "".join(self._data)
self._data = []
#try:
if 1:
if self._read_item is not None:
self._read_item((self._object, self._index), data)
#except Exception, e:
# raise XmlError("Error parsing tag '%s': %s" % (self.name,
# str(e)))
if self._afterfunc:
self._afterfunc((self._object, self._index))
self._index += 1
#=====================
# writing
def write(self, obj, out):
# write opening
if len(self._tags) == 0:
assert self._write_item is not None
for i in self._iterfunc(obj):
out.write("<%s>%s</%s>\n" % (self.name,
escape(self._write_item((obj, i))),
self.name))
else:
for i in self._iterfunc(obj):
out.write("<%s>\n" % self.name)
for child_tag in self._tag_list:
child_tag.write((obj, i), out)
out.write("</%s>\n" % self.name)
'''
# TODO: remove get?
class TagList (TagMany):
"""A specialization of TagMany to work with reading and writing lists"""
def __init__(self, name, lst, get=None, set=None, before=None, after=None,
tags=[]):
TagMany.__init__(self, name, self._iter,
get=get, set=set,
before=before, after=after, tags=tags)
self._list = lst
def new_tag(self, name):
tag = self._tags.get(name, None)
if tag:
tag.set_object(self._list)
return tag
'''
class XmlObject (object):
"""Represents an object <--> XML document binding"""
def __init__(self, *tags):
self._object = None
self._root_tag = Tag("", tags=tags)
self._current_tags = [self._root_tag]
def __start_element(self, name, attrs):
"""Start tag callback"""
if len(self._current_tags) > 0:
last_tag = self._current_tags[-1]
if last_tag:
new_tag = last_tag.new_tag(name)
self._current_tags.append(new_tag)
if new_tag:
new_tag.start_tag()
def __end_element(self, name):
"""End tag callback"""
if len(self._current_tags) > 0:
last_tag = self._current_tags.pop()
if last_tag:
if last_tag.name == name:
last_tag.end_tag()
else:
raise XmlError("Malformed XML")
def __char_data(self, data):
"""read character data and give it to current tag"""
if len(self._current_tags) > 0:
tag = self._current_tags[-1]
if tag:
tag.queue_data(data)
def read(self, obj, filename):
"""Read XML from 'filename' and store data into object 'obj'"""
if isinstance(filename, basestring):
infile = open(filename, "r")
else:
infile = filename
self._object = obj
self._root_tag.set_object(self._object)
self._current_tags = [self._root_tag]
self._root_tag.init()
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.__start_element
parser.EndElementHandler = self.__end_element
parser.CharacterDataHandler = self.__char_data
try:
parser.ParseFile(infile)
except xml.parsers.expat.ExpatError, e:
raise XmlError("Error reading file '%s': %s" % (filename, str(e)))
if len(self._current_tags) > 1:
print [x.name for x in self._current_tags]
raise XmlError("Incomplete file '%s'" % filename)
infile.close()
def write(self, obj, filename):
"""Write object 'obj' to file 'filename'"""
if isinstance(filename, basestring):
#out = codecs.open(filename, "w", "utf-8")
out = safefile.open(filename, "w", codec="utf-8")
need_close = True
else:
out = filename
need_close = False
out.write(u"<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
self._root_tag.write(obj, out)
out.write(u"\n")
if need_close:
out.close()
if __name__ == "__main__":
import StringIO
parser = XmlObject(
Tag("notebook", tags=[
Tag("window_size",
attr=("window_size",
lambda x: tuple(map(int, x.split(","))),
lambda x: "%d,%d" % x)),
Tag("window_pos",
attr=("window_pos",
lambda x: tuple(map(int, x.split(","))),
lambda x: "%d,%d" % x)),
Tag("vsash_pos",
attr=("vhash_pos", int, str)),
Tag("hsash_pos",
attr=("hsash_pos", int, str)),
Tag("external_apps", tags=[
TagMany("app",
iterfunc=lambda s: range(len(s.apps)),
get=lambda (s,i), x: s.apps.append(x),
set=lambda (s,i): s.apps[i])]),
Tag("external_apps2", tags=[
TagMany("app",
iterfunc=lambda s: range(len(s.apps2)),
before=lambda (s,i): s.apps2.append([None, None]),
tags=[Tag("name",
get=lambda (s,i),x: s.apps2[i].__setitem__(0, x),
set=lambda (s,i): s.apps2[i][0]),
Tag("prog",
get=lambda (s,i),x: s.apps2[i].__setitem__(1,x),
set=lambda (s,i): s.apps2[i][1])
])
]),
]))
class Pref (object):
def __init__(self):
self.window_size = (0, 0)
self.window_pos = (0, 0)
self.vsash_pos = 0
self.hsash_pos = 0
self.apps = []
self.apps2 = []
def read(self, filename):
parser.read(self, filename)
def write(self, filename):
parser.write(self, filename)
#from rasmus import util
#util.tic("run")
infile = StringIO.StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<notebook>
<window_size>1053,905</window_size>
<window_pos>0,0</window_pos>
<vsash_pos>0</vsash_pos>
<hsash_pos>250</hsash_pos>
<external_apps>
<app>web_browser</app>
<app>image_editor</app>
</external_apps>
<external_apps2>
<app><name>web_browser</name><prog>firefox</prog></app>
<app><name>image_editor</name><prog>gimp</prog></app>
</external_apps2>
</notebook>
""")
for i in xrange(1):#0000):
pref = Pref()
pref.read(infile)
pref.write(sys.stdout)
#util.toc()
'''
def get_dom_children(node):
"""Convenience function for iterating the children of a DOM object"""
child = node.firstChild
while child:
yield child
child = child.nextSibling
'''
|
brotchie/keepnote
|
keepnote/compat/xmlobject_v3.py
|
Python
|
gpl-2.0
| 13,706 | 0.008172 |
"""
Admonition extension for Python-Markdown
========================================
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa
See <https://Python-Markdown.github.io/extensions/admonition>
for documentation.
Original code Copyright [Tiago Serafim](http://www.tiagoserafim.com/).
All changes Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from ..util import etree
import re
class AdmonitionExtension(Extension):
""" Admonition extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add Admonition to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105)
class AdmonitionProcessor(BlockProcessor):
CLASSNAME = 'admonition'
CLASSNAME_TITLE = 'admonition-title'
RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)')
RE_SPACES = re.compile(' +')
def test(self, parent, block):
sibling = self.lastChild(parent)
return self.RE.search(block) or \
(block.startswith(' ' * self.tab_length) and sibling is not None and
sibling.get('class', '').find(self.CLASSNAME) != -1)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
block = block[m.end():] # removes the first line
block, theRest = self.detab(block)
if m:
klass, title = self.get_class_and_title(m)
div = etree.SubElement(parent, 'div')
div.set('class', '%s %s' % (self.CLASSNAME, klass))
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
def get_class_and_title(self, match):
klass, title = match.group(1).lower(), match.group(2)
klass = self.RE_SPACES.sub(' ', klass)
if title is None:
# no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render
# `<p class="admonition-title">Note</p>`
title = klass.split(' ', 1)[0].capitalize()
elif title == '':
# an explicit blank title should not be rendered
# e.g.: `!!! warning ""` will *not* render `p` with a title
title = None
return klass, title
def makeExtension(**kwargs): # pragma: no cover
return AdmonitionExtension(**kwargs)
|
unreal666/outwiker
|
plugins/markdown/markdown/markdown_plugin_libs/markdown/extensions/admonition.py
|
Python
|
gpl-3.0
| 3,188 | 0.000941 |
from __future__ import unicode_literals
from datetime import date
from django.test import TestCase
from import_export import fields
class Obj:
def __init__(self, name, date=None):
self.name = name
self.date = date
class FieldTest(TestCase):
def setUp(self):
self.field = fields.Field(column_name='name', attribute='name')
self.row = {
'name': 'Foo',
}
self.obj = Obj(name='Foo', date=date(2012, 8, 13))
def test_clean(self):
self.assertEqual(self.field.clean(self.row),
self.row['name'])
def test_export(self):
self.assertEqual(self.field.export(self.obj),
self.row['name'])
def test_save(self):
self.row['name'] = 'foo'
self.field.save(self.obj, self.row)
self.assertEqual(self.obj.name, 'foo')
def test_save_follow(self):
class Test:
class name:
class follow:
me = 'bar'
test = Test()
field = fields.Field(column_name='name', attribute='name__follow__me')
row = {'name': 'foo'}
field.save(test, row)
self.assertEqual(test.name.follow.me, 'foo')
def test_following_attribute(self):
field = fields.Field(attribute='other_obj__name')
obj2 = Obj(name="bar")
self.obj.other_obj = obj2
self.assertEqual(field.export(self.obj), "bar")
def test_default(self):
field = fields.Field(default=1, column_name='name')
self.assertEqual(field.clean({'name': None}), 1)
def test_default_falsy_values(self):
field = fields.Field(default=1, column_name='name')
self.assertEqual(field.clean({'name': 0}), 0)
def test_default_falsy_values_without_default(self):
field = fields.Field(column_name='name')
self.assertEqual(field.clean({'name': 0}), 0)
def test_saves_null_values(self):
field = fields.Field(column_name='name', attribute='name', saves_null_values=False)
row = {
'name': None,
}
field.save(self.obj, row)
self.assertEqual(self.obj.name, 'Foo')
self.field.save(self.obj, row)
self.assertIsNone(self.obj.name)
|
daniell/django-import-export
|
tests/core/tests/test_fields.py
|
Python
|
bsd-2-clause
| 2,262 | 0.000442 |
## Automatically adapted for scipy Oct 21, 2005 by
"""
Integration routines
====================
Methods for Integrating Functions given function object.
quad -- General purpose integration.
dblquad -- General purpose double integration.
tplquad -- General purpose triple integration.
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n.
quadrature -- Integrate with given tolerance using Gaussian quadrature.
romberg -- Integrate func using Romberg integration.
Methods for Integrating Functions given fixed samples.
trapz -- Use trapezoidal rule to compute integral from samples.
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
(2**k + 1) evenly-spaced samples.
See the special module's orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Interface to numerical integrators of ODE systems.
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
"""
postpone_import = 1
|
stefanv/scipy3
|
scipy/integrate/info.py
|
Python
|
bsd-3-clause
| 1,311 | 0.000763 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Contributor: Pedro Manuel Baeza <pedro.baeza@serviciosbaeza.com>
# Ignacio Ibeas <ignacio@acysos.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class ResCountryState(orm.Model):
_inherit = 'res.country.state'
_columns = {'better_zip_ids': fields.one2many('res.better.zip', 'state_id', 'Cities')}
|
jmesteve/saas3
|
openerp/addons_extra/base_location/state.py
|
Python
|
agpl-3.0
| 1,248 | 0.000801 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from nova import db
from nova import exception
from nova.objects import aggregate
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_aggregate = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.fake_aggregate,
'name': 'fake-aggregate',
'hosts': ['foo', 'bar'],
'metadetails': {'this': 'that'},
}
SUBS = {'metadata': 'metadetails'}
class _TestAggregateObject(object):
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'aggregate_get')
db.aggregate_get(self.context, 123).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
@mock.patch('nova.objects.Aggregate.save')
@mock.patch('nova.db.aggregate_get')
def test_load_allocates_uuid(self, mock_get, mock_save):
fake_agg = dict(fake_aggregate)
del fake_agg['uuid']
mock_get.return_value = fake_agg
uuid = uuidsentinel.aggregate
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_g:
mock_g.return_value = uuid
obj = aggregate.Aggregate.get_by_id(self.context, 123)
mock_g.assert_called_once_with()
self.assertEqual(uuid, obj.uuid)
mock_save.assert_called_once_with()
def test_create(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
db.aggregate_create(self.context, {'name': 'foo',
'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.uuid = uuidsentinel.fake_agg
agg.create()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
db.aggregate_create(self.context, {'name': 'foo',
'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.uuid = uuidsentinel.fake_agg
agg.create()
self.assertRaises(exception.ObjectActionError, agg.create)
def test_save(self):
self.mox.StubOutWithMock(db, 'aggregate_update')
db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn(
fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.name = 'baz'
agg.save()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_save_and_create_no_hosts(self):
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.hosts = ['foo', 'bar']
self.assertRaises(exception.ObjectActionError,
agg.create)
self.assertRaises(exception.ObjectActionError,
agg.save)
def test_update_metadata(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_delete(self.context, 123, 'todelete')
db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'})
self.mox.ReplayAll()
fake_notifier.NOTIFICATIONS = []
agg = aggregate.Aggregate()
agg._context = self.context
agg.id = 123
agg.metadata = {'foo': 'bar'}
agg.obj_reset_changes()
agg.update_metadata({'todelete': None, 'toadd': 'myval'})
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
def test_destroy(self):
self.mox.StubOutWithMock(db, 'aggregate_delete')
db.aggregate_delete(self.context, 123)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.destroy()
def test_add_host(self):
self.mox.StubOutWithMock(db, 'aggregate_host_add')
db.aggregate_host_add(self.context, 123, 'bar'
).AndReturn({'host': 'bar'})
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo']
agg._context = self.context
agg.add_host('bar')
self.assertEqual(agg.hosts, ['foo', 'bar'])
def test_delete_host(self):
self.mox.StubOutWithMock(db, 'aggregate_host_delete')
db.aggregate_host_delete(self.context, 123, 'foo')
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
agg._context = self.context
agg.delete_host('foo')
self.assertEqual(agg.hosts, ['bar'])
def test_availability_zone(self):
agg = aggregate.Aggregate()
agg.metadata = {'availability_zone': 'foo'}
self.assertEqual('foo', agg.availability_zone)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'aggregate_get_all')
db.aggregate_get_all(self.context).AndReturn([fake_aggregate])
self.mox.ReplayAll()
aggs = aggregate.AggregateList.get_all(self.context)
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
def test_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_get_by_host')
db.aggregate_get_by_host(self.context, 'fake-host', key=None,
).AndReturn([fake_aggregate])
self.mox.ReplayAll()
aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['baz'])
self.assertEqual(0, len(aggs))
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['foo', 'bar'])
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
class TestAggregateObject(test_objects._LocalTest,
_TestAggregateObject):
pass
class TestRemoteAggregateObject(test_objects._RemoteTest,
_TestAggregateObject):
pass
|
HybridF5/nova
|
nova/tests/unit/objects/test_aggregate.py
|
Python
|
apache-2.0
| 8,667 | 0 |
#!/usr/bin/env python3
"""
script -- A widget displaying output of a script that lets you interact with it.
"""
import gi.repository, subprocess, sys
gi.require_version('Budgie', '1.0')
gi.require_version('Wnck', '3.0')
from gi.repository import Budgie, GObject, Wnck, Gtk, Gio, GLib
class ScriptPlugin(GObject.GObject, Budgie.Plugin):
""" A wrapper for the Script plugin. """
__gtype_name__ = 'Script'
def __init__(self):
super().__init__()
def do_get_panel_widget(self, uuid):
""" Initialize and return a new Script widget. """
return ScriptWidget(uuid)
class ScriptWidget(Budgie.Applet):
def __init__(self, uuid):
super().__init__()
self.uuid = uuid
self.directory = "budgie-script-applet"
self.button = Gtk.Button(label="Script files not found!")
self.add(self.button)
self.button.set_label(subprocess.check_output([GLib.get_user_config_dir()+"/"+self.directory+"/ontimeout.sh"]).decode(sys.stdout.encoding).strip())
self.show_all()
self.button.connect_after('clicked', self.on_click)
self.timeout_id = GObject.timeout_add(1000, self.on_timeout, None)
def on_click(self, button):
subprocess.call([GLib.get_user_config_dir()+"/"+self.directory+"/onclick.sh"])
def on_timeout(self, user_data):
self.button.set_label(subprocess.check_output([GLib.get_user_config_dir()+"/"+self.directory+"/ontimeout.sh"]).decode(sys.stdout.encoding).strip())
return True
|
kacperski1/budgie-script-applet
|
script.py
|
Python
|
gpl-2.0
| 1,518 | 0.003953 |
```
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
For example,
Consider the following matrix:
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
Given target = 3, return true
```
# Good answer !
class Solution:
# @param matrix, a list of lists of integers
# @param target, an integer
# @return a boolean
def searchMatrix(self, matrix, target):
m = len(matrix)
n = len(matrix[0])
left = 0; right = m*n - 1
while left <= right:
mid = (left+right)/2
value = matrix[mid/n][mid%n]
if value == target:
return True
elif value > target:
right = mid -1
elif value < target:
left = mid + 1
return False
|
UmassJin/Leetcode
|
Array/Search_in_2D_matrix.py
|
Python
|
mit
| 999 | 0.013013 |
from __future__ import absolute_import, unicode_literals, print_function, division
import tempfile
import re
import os
import codecs
import sublime
import sublime_plugin
from . import git_root, GitTextCommand
def temp_file(view, key):
if not view.settings().get('git_annotation_temp_%s' % key, False):
fd, filepath = tempfile.mkstemp(prefix='git_annotations_')
os.close(fd)
view.settings().set('git_annotation_temp_%s' % key, filepath)
return view.settings().get('git_annotation_temp_%s' % key)
class GitClearAnnotationCommand(GitTextCommand):
def run(self, view):
self.active_view().settings().set('live_git_annotations', False)
self.view.erase_regions('git.changes.x')
self.view.erase_regions('git.changes.+')
self.view.erase_regions('git.changes.-')
class GitToggleAnnotationsCommand(GitTextCommand):
def run(self, view):
if self.active_view().settings().get('live_git_annotations'):
self.view.run_command('git_clear_annotation')
else:
self.view.run_command('git_annotate')
class GitAnnotationListener(sublime_plugin.EventListener):
def on_modified(self, view):
if not view.settings().get('live_git_annotations'):
return
view.run_command('git_annotate')
def on_load(self, view):
s = sublime.load_settings("Git.sublime-settings")
if s.get('annotations'):
view.run_command('git_annotate')
class GitAnnotateCommand(GitTextCommand):
# Unfortunately, git diff does not support text from stdin, making a *live*
# annotation difficult. Therefore I had to resort to the system diff
# command.
# This works as follows:
# 1. When the command is run for the first time for this file, a temporary
# file with the current state of the HEAD is being pulled from git.
# 2. All consecutive runs will pass the current buffer into diffs stdin.
# The resulting output is then parsed and regions are set accordingly.
may_change_files = False
def run(self, view):
# If the annotations are already running, we dont have to create a new
# tmpfile
if not hasattr(self, "git_tmp"):
self.git_tmp = temp_file(self.active_view(), 'head')
self.buffer_tmp = temp_file(self.active_view(), 'buffer')
self.active_view().settings().set('live_git_annotations', True)
root = git_root(self.get_working_dir())
repo_file = os.path.relpath(self.view.file_name(), root).replace('\\', '/') # always unix
self.run_command(['git', 'show', 'HEAD:{0}'.format(repo_file)], show_status=False, no_save=True, callback=self.compare_tmp)
def compare_tmp(self, result, stdout=None):
with open(self.buffer_tmp, 'wb') as f:
contents = self.get_view_contents()
if self.view.encoding() == "UTF-8 with BOM":
f.write(codecs.BOM_UTF8)
f.write(contents)
with open(self.git_tmp, 'wb') as f:
f.write(result.encode())
self.run_command(['git', 'diff', '-u', '--', self.git_tmp, self.buffer_tmp], no_save=True, show_status=False, callback=self.parse_diff)
# This is where the magic happens. At the moment, only one chunk format is supported. While
# the unified diff format theoritaclly supports more, I don't think git diff creates them.
def parse_diff(self, result, stdin=None):
if result.startswith('error:'):
print('Aborted annotations:', result)
return
lines = result.splitlines()
matcher = re.compile(r'^@@ -([0-9]*),([0-9]*) \+([0-9]*),([0-9]*) @@')
diff = []
for line_index in range(0, len(lines)):
line = lines[line_index]
if not line.startswith('@'):
continue
match = matcher.match(line)
if not match:
continue
line_before, len_before, line_after, len_after = [int(match.group(x)) for x in [1, 2, 3, 4]]
chunk_index = line_index + 1
tracked_line_index = line_after - 1
deletion = False
insertion = False
while True:
line = lines[chunk_index]
if line.startswith('@'):
break
elif line.startswith('-'):
if not line.strip() == '-':
deletion = True
tracked_line_index -= 1
elif line.startswith('+'):
if deletion and not line.strip() == '+':
diff.append(['x', tracked_line_index])
insertion = True
elif not deletion:
insertion = True
diff.append(['+', tracked_line_index])
else:
if not insertion and deletion:
diff.append(['-', tracked_line_index])
insertion = deletion = False
tracked_line_index += 1
chunk_index += 1
if chunk_index >= len(lines):
break
self.annotate(diff)
# Once we got all lines with their specific change types (either x, +, or - for
# modified, added, or removed) we can create our regions and do the actual annotation.
def annotate(self, diff):
self.view.erase_regions('git.changes.x')
self.view.erase_regions('git.changes.+')
self.view.erase_regions('git.changes.-')
typed_diff = {'x': [], '+': [], '-': []}
for change_type, line in diff:
if change_type == '-':
full_region = self.view.full_line(self.view.text_point(line - 1, 0))
position = full_region.begin()
for i in range(full_region.size()):
typed_diff[change_type].append(sublime.Region(position + i))
else:
point = self.view.text_point(line, 0)
region = self.view.full_line(point)
if change_type == '-':
region = sublime.Region(point, point + 5)
typed_diff[change_type].append(region)
for change in ['x', '+']:
self.view.add_regions("git.changes.{0}".format(change), typed_diff[change], 'git.changes.{0}'.format(change), 'dot', sublime.HIDDEN)
self.view.add_regions("git.changes.-", typed_diff['-'], 'git.changes.-', 'dot', sublime.DRAW_EMPTY_AS_OVERWRITE)
def get_view_contents(self):
region = sublime.Region(0, self.view.size())
try:
contents = self.view.substr(region).encode(self._get_view_encoding())
except UnicodeError:
# Fallback to utf8-encoding
contents = self.view.substr(region).encode('utf-8')
except LookupError:
# May encounter an encoding we don't have a codec for
contents = self.view.substr(region).encode('utf-8')
return contents
# Copied from GitGutter
def _get_view_encoding(self):
# get encoding and clean it for python ex: "Western (ISO 8859-1)"
# NOTE(maelnor): are we need regex here?
pattern = re.compile(r'.+\((.*)\)')
encoding = self.view.encoding()
if encoding == "Undefined":
encoding = self.view.settings().get('default_encoding')
if pattern.match(encoding):
encoding = pattern.sub(r'\1', encoding)
encoding = encoding.replace('with BOM', '')
encoding = encoding.replace('Windows', 'cp')
encoding = encoding.replace('-', '_')
encoding = encoding.replace(' ', '')
# work around with ConvertToUTF8 plugin
origin_encoding = self.view.settings().get('origin_encoding')
return origin_encoding or encoding
|
kemayo/sublime-text-git
|
git/annotate.py
|
Python
|
mit
| 7,837 | 0.001786 |
import numpy as np
from spins import goos
from spins.goos import material
from spins.goos import shapes
def test_pixelated_cont_shape():
def init(size):
return np.ones(size)
var, shape = shapes.pixelated_cont_shape(
init, [100, 100, 10], [20, 30, 10],
var_name="var_name",
name="shape_name",
pos=goos.Constant([1, 2, 3]),
material=material.Material(index=1),
material2=material.Material(index=2))
assert var._goos_name == "var_name"
assert shape._goos_name == "shape_name"
def test_pixelated_cont_shape_flow_get_relative_cell_coords():
coords = shapes.PixelatedContShapeFlow.get_relative_cell_coords(
[100, 100, 10], [20, 40, 10])
np.testing.assert_array_equal(coords[0], [-40, -20, 0, 20, 40])
np.testing.assert_array_equal(coords[1], [-35, 0, 35])
assert coords[2] == 0
def test_pixelated_cont_shape_flow_get_relative_cell_coords_decimal():
factor = 2.25
coords = shapes.PixelatedContShapeFlow.get_relative_cell_coords(
np.array([100, 100, 10]) * factor,
np.array([20, 40, 10]) * factor)
np.testing.assert_array_equal(coords[0],
np.array([-40, -20, 0, 20, 40]) * factor)
np.testing.assert_array_equal(coords[1], np.array([-35, 0, 35]) * factor)
assert coords[2] == 0
def test_pixelated_cont_shape_flow_get_relative_edge_coords():
coords = shapes.PixelatedContShapeFlow.get_relative_edge_coords(
[100, 100, 10], [20, 40, 10])
np.testing.assert_array_equal(coords[0], [-50, -30, -10, 10, 30, 50])
np.testing.assert_array_equal(coords[1], [-50, -20, 20, 50])
np.testing.assert_array_equal(coords[2], [-5, 5])
def test_pixelated_cont_shape_flow_get_shape():
extents = [100, 110, 10]
pixel_size = [20, 40, 10]
coords = shapes.PixelatedContShapeFlow.get_relative_cell_coords(
extents, pixel_size)
shape = shapes.PixelatedContShapeFlow.get_shape(extents, pixel_size)
assert shape == [len(coords[0]), len(coords[1]), len(coords[2])]
def make_cuboid_flow_grad(index, priority=0):
extents = [0, 0, 0]
pos = [index, 0, 0]
shape = goos.cuboid(extents=extents, pos=pos, priority=priority)
flow = goos.CuboidFlow(extents=extents, pos=pos, priority=priority)
grad = goos.CuboidFlow.Grad(pos_grad=[index, 0, 0])
return shape, flow, grad
def test_group_shape_1_shape():
with goos.OptimizationPlan() as plan:
shape, shape_flow, shape_grad = make_cuboid_flow_grad(1)
group = goos.GroupShape([shape])
inputs = [shape_flow]
assert group.eval(inputs) == goos.ArrayFlow([shape_flow])
assert group.grad(inputs,
goos.ArrayFlow.Grad([shape_grad])) == [shape_grad]
def test_group_shape_2_shape():
with goos.OptimizationPlan() as plan:
shape1, flow1, grad1 = make_cuboid_flow_grad(1)
shape2, flow2, grad2 = make_cuboid_flow_grad(2)
group = goos.GroupShape([shape1, shape2])
inputs = [flow1, flow2]
assert group.eval(inputs) == goos.ArrayFlow([flow1, flow2])
assert group.grad(inputs,
goos.ArrayFlow.Grad([grad1,
grad2])) == [grad1, grad2]
def test_group_shape_2_shape_priority():
with goos.OptimizationPlan() as plan:
shape1, flow1, grad1 = make_cuboid_flow_grad(1, priority=1)
shape2, flow2, grad2 = make_cuboid_flow_grad(2)
group = goos.GroupShape([shape1, shape2])
inputs = [flow1, flow2]
assert group.eval(inputs) == goos.ArrayFlow([flow2, flow1])
assert group.grad(inputs,
goos.ArrayFlow.Grad([grad2,
grad1])) == [grad1, grad2]
def test_group_shape_3_shape_priority():
with goos.OptimizationPlan() as plan:
shape1, flow1, grad1 = make_cuboid_flow_grad(1, priority=2)
shape2, flow2, grad2 = make_cuboid_flow_grad(2)
shape3, flow3, grad3 = make_cuboid_flow_grad(3, priority=1)
group = goos.GroupShape([shape1, shape2, shape3])
inputs = [flow1, flow2, flow3]
assert group.eval(inputs) == goos.ArrayFlow([flow2, flow3, flow1])
assert group.grad(inputs,
goos.ArrayFlow.Grad([grad2, grad3, grad1
])) == [grad1, grad2, grad3]
def test_group_shape_3_shape_priority_stable_sort():
with goos.OptimizationPlan() as plan:
shape1, flow1, grad1 = make_cuboid_flow_grad(1, priority=2)
shape2, flow2, grad2 = make_cuboid_flow_grad(2, priority=1)
shape3, flow3, grad3 = make_cuboid_flow_grad(3, priority=1)
group = goos.GroupShape([shape1, shape2, shape3])
inputs = [flow1, flow2, flow3]
assert group.eval(inputs) == goos.ArrayFlow([flow2, flow3, flow1])
assert group.grad(inputs,
goos.ArrayFlow.Grad([grad2, grad3, grad1
])) == [grad1, grad2, grad3]
def test_group_shape_array():
with goos.OptimizationPlan() as plan:
shape1, flow1, grad1 = make_cuboid_flow_grad(1)
shape2, flow2, grad2 = make_cuboid_flow_grad(2)
shape3, flow3, grad3 = make_cuboid_flow_grad(3)
group = goos.GroupShape([shape1, goos.GroupShape([shape2, shape3])])
inputs = [flow1, goos.ArrayFlow([flow2, flow3])]
assert group.eval(inputs) == goos.ArrayFlow([flow1, flow2, flow3])
assert (group.grad(inputs, goos.ArrayFlow.Grad(
[grad1, grad2,
grad3])) == [grad1, goos.ArrayFlow.Grad([grad2, grad3])])
def test_group_shape_array_priority():
with goos.OptimizationPlan() as plan:
shape1, flow1, grad1 = make_cuboid_flow_grad(1, priority=2)
shape2, flow2, grad2 = make_cuboid_flow_grad(2)
shape3, flow3, grad3 = make_cuboid_flow_grad(3, priority=1)
shape4, flow4, grad4 = make_cuboid_flow_grad(4)
group = goos.GroupShape([
goos.GroupShape([shape1, shape2]), shape3,
goos.GroupShape([shape4])
])
inputs = [
goos.ArrayFlow([flow1, flow2]), flow3,
goos.ArrayFlow([flow4])
]
assert group.eval(inputs) == goos.ArrayFlow(
[flow2, flow4, flow3, flow1])
assert (group.grad(inputs,
goos.ArrayFlow.Grad(
[grad2, grad4, grad3, grad1])) == [
goos.ArrayFlow.Grad([grad1, grad2]), grad3,
goos.ArrayFlow.Grad([grad4])
])
|
stanfordnqp/spins-b
|
spins/goos/test_shapes.py
|
Python
|
gpl-3.0
| 6,692 | 0.000299 |
from setuptools import setup, find_packages
import os
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
CLASSIFIERS = [
#'Development Status :: 1 - ',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: All',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Open Data',
'Topic :: Elections',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.11',
]
INSTALL_REQUIREMENTS = [
'Django==1.11.3'
]
setup(
author='Stefan Kasberger',
author_email='info@offenewahlen.at',
name='offenewahlen_api',
version='0.1',
#version=cms.__version__,
description='Open Election Data API from Austria.',
long_description=README,
url='https://offenewahlen.at/',
license='MIT License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIREMENTS,
packages=find_packages(exclude=['project', 'project.*']),
include_package_data=True,
zip_safe=False,
#test_suite='runtests.main',
)
|
OKFNat/offenewahlen-nrw17
|
setup.py
|
Python
|
mit
| 1,452 | 0.002066 |
# -*- coding: utf-8 -*-
import factory
from data.tests.factories import DepartmentFactory
from ..models import Tourist, TouristCard
class TouristFactory(factory.DjangoModelFactory):
class Meta:
model = Tourist
first_name = 'Dave'
last_name = 'Greel'
email = 'greel@musicians.com'
class TouristCardFactory(factory.DjangoModelFactory):
class Meta:
model = TouristCard
tourist = factory.SubFactory(TouristFactory)
current_department = factory.SubFactory(DepartmentFactory)
|
notfier/touristique
|
tourists/tests/factories.py
|
Python
|
mit
| 524 | 0 |
# -*- coding: utf-8 -*-
"""
Plugin that logs the current optimum to standard output.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.plugin.plugin import Plugin
class OptimumPrintPlugin(Plugin):
"""
Logs new optima for all return values to the standard output on result.
For example::
Minimum for value: f(a=0.1, b=0.2) = 0.7
"""
def __init__(self):
self._optima = dict() # holds a tuple of args and result for each name
def on_invoke(self, invocation):
# There are no new optima in invokes, so do nothing.
pass
def on_result(self, invocation):
"""
Logs new optima to the standard output.
"""
for return_value in invocation.function.return_spec.return_values:
name = return_value['name']
if name not in self._optima.keys() or \
self._optima[name][1] > invocation.current_result:
self._optima[name] = (tuple(invocation.fargs),
invocation.current_result)
print("%s for %s: f%s = %s" % \
("Minimum" if return_value['minimize'] else "Maximum",
name,
self._optima[name][0], self._optima[name][1]))
def on_error(self, invocation):
# There are no new optima in errors, so do nothing.
pass
|
cigroup-ol/metaopt
|
metaopt/plugin/print/optimum.py
|
Python
|
bsd-3-clause
| 1,486 | 0.000673 |
import argparse
import subprocess
import struct
import json
import shutil
import os
import collections
argparser = argparse.ArgumentParser(description='Reduce the logfile to make suitable for online destribution.')
argparser.add_argument('js_file', help='the js file to parse')
argparser.add_argument('output_name', help='the name of the output (without the .js)')
argparser.add_argument('--no-corrections', action='store_true', help='don\'t compute the corrections files')
args = argparser.parse_args()
corrections = not args.no_corrections
jsfile = args.js_file;
if jsfile[0] != "/":
jsfile = os.getcwd() + "/" + jsfile;
output = args.output_name;
pwd = os.path.dirname(os.path.realpath(__file__))
datapwd = os.path.dirname(jsfile)
print "Get data information"
fp = open(jsfile, "r")
data = json.load(fp)
fp.close()
TreeItem = collections.namedtuple('TreeItem', ['id', 'start', 'stop', 'textId', 'children', 'nextId'])
class TreeReader(object):
def __init__(self, fp):
self.fp = fp
def readItem(self, offset):
struct_fmt = '!QQII'
struct_len = struct.calcsize(struct_fmt)
struct_unpack = struct.Struct(struct_fmt).unpack_from
self.fp.seek(offset * struct_len)
s = self.fp.read(struct_len)
if not s:
return
s = struct_unpack(s)
return TreeItem(offset, s[0], s[1], s[2] >> 1, s[2] & 0x1, s[3])
def writeItem(self, item):
struct_fmt = '!QQII'
struct_len = struct.calcsize(struct_fmt)
struct_pack = struct.Struct(struct_fmt).pack
self.fp.seek(item.id * struct_len)
s = struct_pack(item.start, item.stop, item.textId * 2 + item.children, item.nextId)
self.fp.write(s)
def getStop(self):
parentItem = self.readItem(0)
if parentItem.stop is not 0:
return parentItem.stop
# If there are no children. Still use parentItem.stop
if parentItem.children is 0:
return parentItem.stop
# The parent item doesn't contain the stop information.
# Get the last tree item for the stop information.
itemId = 1
while True:
item = self.readItem(itemId)
if item.nextId is 0:
return item.stop
itemId = item.nextId
class CreateDataTree(TreeReader):
def __init__(self, fp, start, stop):
TreeReader.__init__(self, fp)
self.writeItem(TreeItem(0, start, stop, 0, 0, 0))
self.newId = 1
def addChild(self, parent, oldItem):
parentItem = self.readItem(parent)
if parentItem.children is 1:
lastChildItem = self.readItem(parent + 1)
while lastChildItem.nextId is not 0:
lastChildItem = self.readItem(lastChildItem.nextId)
self.writeItem(lastChildItem._replace(nextId = self.newId))
else:
assert self.newId == parent + 1
self.writeItem(parentItem._replace(children = 1))
self.writeItem(TreeItem(self.newId, oldItem.start, oldItem.stop, oldItem.textId, 0, 0))
newId = self.newId
self.newId += 1
return newId
class Overview:
def __init__(self, tree, dic):
self.tree = tree
self.dic = dic
self.engineOverview = {}
self.scriptOverview = {}
self.scriptTimes = {}
def isScriptInfo(self, tag):
return tag[0:6] == "script";
def clearScriptInfo(self, tag):
return tag == "G" or tag == "g";
def calc(self):
self.processTreeItem("", self.tree.readItem(0))
def processTreeItem(self, script, item):
time = item.stop - item.start
info = self.dic[item.textId]
if self.clearScriptInfo(info):
script = ""
elif self.isScriptInfo(info):
script = info
if item.children is 1:
childItem = self.tree.readItem(item.id + 1)
while childItem:
time -= childItem.stop - childItem.start
self.processTreeItem(script, childItem)
if childItem.nextId is 0:
break
childItem = self.tree.readItem(childItem.nextId)
if item.id == 0:
return
if script is "":
return
if time > 0 and not self.isScriptInfo(info):
if info not in self.engineOverview:
self.engineOverview[info] = 0
self.engineOverview[info] += time
if script is not "":
if script not in self.scriptTimes:
self.scriptTimes[script] = {}
if info not in self.scriptTimes[script]:
self.scriptTimes[script][info] = 0;
self.scriptTimes[script][info] += 1;
if script not in self.scriptOverview:
self.scriptOverview[script] = {}
if info not in self.scriptOverview[script]:
self.scriptOverview[script][info] = 0
self.scriptOverview[script][info] += time;
def visitItem(oldTree, newTree, parent, oldItem):
if oldItem.stop - oldItem.start >= threshold:
newId = newTree.addChild(parent, oldItem)
if oldItem.children is 0:
return
childItem = oldTree.readItem(oldItem.id + 1)
while childItem:
visitItem(oldTree, newTree, newId, childItem)
if childItem.nextId is 0:
break
childItem = oldTree.readItem(childItem.nextId)
ndata = []
for j in range(len(data)):
fp = open(datapwd+"/"+data[j]["tree"], "rb")
wp = open(output+'.tree.'+str(j)+'.tl', 'w+b')
oldTree = TreeReader(fp)
parentItem = oldTree.readItem(0)
start = parentItem.start
stop = oldTree.getStop()
newTree = CreateDataTree(wp, start, stop)
# accurency of 0.1px when graph shown on 1600 width display (1600*400)
threshold = (stop - start) / 640000
if parentItem.children is 1:
childItem = oldTree.readItem(1)
while childItem:
visitItem(oldTree, newTree, 0, childItem)
if childItem.nextId is 0:
break
childItem = oldTree.readItem(childItem.nextId)
if corrections:
fp = open(datapwd+"/"+data[j]["dict"], "r")
dic = json.load(fp)
fp.close()
fullOverview = Overview(oldTree, dic)
fullOverview.calc()
partOverview = Overview(newTree, dic)
partOverview.calc()
correction = {
"engineOverview": {},
"scriptTimes": {},
"scriptOverview": {}
}
for i in fullOverview.engineOverview:
correction["engineOverview"][i] = fullOverview.engineOverview[i]
if i in partOverview.engineOverview:
correction["engineOverview"][i] -= partOverview.engineOverview[i]
for script in fullOverview.scriptTimes:
correction["scriptTimes"][script] = {}
for part in fullOverview.scriptTimes[script]:
correction["scriptTimes"][script][part] = fullOverview.scriptTimes[script][part]
if script in partOverview.scriptTimes and part in partOverview.scriptTimes[script]:
correction["scriptTimes"][script][part] -= partOverview.scriptTimes[script][part]
for script in fullOverview.scriptOverview:
correction["scriptOverview"][script] = {}
for part in fullOverview.scriptOverview[script]:
correction["scriptOverview"][script][part] = fullOverview.scriptOverview[script][part]
if script in partOverview.scriptOverview and part in partOverview.scriptOverview[script]:
correction["scriptOverview"][script][part] -= partOverview.scriptOverview[script][part]
corrFile = open(output+'.corrections.'+str(j)+'.js', 'wb')
json.dump(correction, corrFile)
corrFile.close()
print "copy textmap"
shutil.copyfile(datapwd+"/"+data[j]["dict"], output+".dict."+str(j)+".js")
ndata.append({
"tree": os.path.basename(output)+'.tree.'+str(j)+'.tl',
"dict": os.path.basename(output)+'.dict.'+str(j)+'.js'
})
if corrections:
ndata[-1]["corrections"] = os.path.basename(output)+'.corrections.'+str(j)+'.js'
print "writing js file"
fp = open(output+".json", "w")
json.dump(ndata, fp);
fp.close()
|
tschneidereit/shumway
|
traceLogging/reduce.py
|
Python
|
apache-2.0
| 8,311 | 0.006858 |
from urlparse import urljoin
from django import template
from django.template.base import Node
from django.utils.encoding import iri_to_uri
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
|
rebost/django
|
django/templatetags/static.py
|
Python
|
bsd-3-clause
| 3,940 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.CommitmentAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.CommitmentAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.CommitmentAggregatedList],
request: compute.AggregatedListRegionCommitmentsRequest,
response: compute.CommitmentAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListRegionCommitmentsRequest):
The initial request object.
response (google.cloud.compute_v1.types.CommitmentAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListRegionCommitmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.CommitmentAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[Tuple[str, compute.CommitmentsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.CommitmentsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.CommitmentList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.CommitmentList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.CommitmentList],
request: compute.ListRegionCommitmentsRequest,
response: compute.CommitmentList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListRegionCommitmentsRequest):
The initial request object.
response (google.cloud.compute_v1.types.CommitmentList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListRegionCommitmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.CommitmentList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[compute.Commitment]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
googleapis/python-compute
|
google/cloud/compute_v1/services/region_commitments/pagers.py
|
Python
|
apache-2.0
| 5,692 | 0.000878 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for aggregator module."""
import unittest2 as unittest
from nupic.data import aggregator
class AggregatorTest(unittest.TestCase):
"""Unit tests for misc. aggregator functions."""
def testFixAggregationDict(self):
# Simplest case.
result = aggregator._aggr_weighted_mean((1.0, 1.0), (1, 1))
self.assertAlmostEqual(result, 1.0, places=7)
# Simple non-uniform case.
result = aggregator._aggr_weighted_mean((1.0, 2.0), (1, 2))
self.assertAlmostEqual(result, 5.0/3.0, places=7)
# Make sure it handles integer values as integers.
result = aggregator._aggr_weighted_mean((1, 2), (1, 2))
self.assertAlmostEqual(result, 1, places=7)
# More-than-two case.
result = aggregator._aggr_weighted_mean((1.0, 2.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 14.0/6.0, places=7)
# Handle zeros.
result = aggregator._aggr_weighted_mean((1.0, 0.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 10.0/6.0, places=7)
# Handle negative numbers.
result = aggregator._aggr_weighted_mean((1.0, -2.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 1.0, places=7)
if __name__ == '__main__':
unittest.main()
|
0x0all/nupic
|
tests/unit/py2/nupic/data/aggregator_test.py
|
Python
|
gpl-3.0
| 2,186 | 0.001372 |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run YCSB against MongoDB.
YCSB is a load generator for many 'cloud' databases. MongoDB is a NoSQL
database.
MongoDB homepage: http://www.mongodb.org/
YCSB homepage: https://github.com/brianfrankcooper/YCSB/wiki
"""
import functools
import random
import string
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_benchmarks import mongodb_ycsb_benchmark
from perfkitbenchmarker.linux_packages import ycsb
FLAGS = flags.FLAGS
flags.DEFINE_string('kubernetes_mongodb_cpu_request', '7.1',
'CPU request of mongodb.')
flags.DEFINE_string('kubernetes_mongodb_memory_request', '16Gi',
'Memory request of mongodb.')
flags.DEFINE_string('kubernetes_mongodb_cpu_limit', '7.6',
'CPU limit of mongodb, should be bigger than CPU request')
flags.DEFINE_string(
'kubernetes_mongodb_memory_limit', '32Gi',
'Memory limit of mongodb, should be bigger than memory request')
flags.DEFINE_string('kubernetes_mongodb_disk_size', '200Gi',
'Disk size used by mongodb')
# TODO(user): Use GetStorageClass function, once available.
STORAGE_CLASS = flags.DEFINE_string(
'kubernetes_mongodb_storage_class',
None,
'storageClassType of data disk. Defaults to provider specific storage '
'class.')
BENCHMARK_NAME = 'kubernetes_mongodb'
BENCHMARK_CONFIG = """
kubernetes_mongodb:
description: Benchmarks MongoDB server performance.
container_cluster:
cloud: GCP
type: Kubernetes
vm_count: 1
vm_spec: *default_single_core
nodepools:
mongodb:
vm_count: 1
vm_spec:
GCP:
machine_type: n2-standard-8
zone: us-central1-a
Azure:
zone: westus
machine_type: Standard_D3_v2
AWS:
zone: us-east-1a
machine_type: c5.xlarge
clients:
vm_count: 1
vm_spec: *default_single_core
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: null
"""
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['container_cluster']['nodepools']['mongodb']['vm_count'] = (
FLAGS.ycsb_client_vms)
return config
def _PrepareClient(vm):
"""Install YCSB on the client VM."""
vm.Install('ycsb')
# Disable logging for MongoDB driver, which is otherwise quite verbose.
log_config = """<configuration><root level="WARN"/></configuration>"""
vm.RemoteCommand("echo '{0}' > {1}/logback.xml".format(
log_config, ycsb.YCSB_DIR))
def _PrepareDeployment(benchmark_spec):
"""Deploys MongoDB Operator and instance on the cluster."""
cluster = benchmark_spec.container_cluster
admin_password = ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
storage_class = STORAGE_CLASS.value or cluster.GetDefaultStorageClass()
cluster.ApplyManifest(
'container/kubernetes_mongodb/kubernetes_mongodb_crd.yaml')
cluster.ApplyManifest(
'container/kubernetes_mongodb/kubernetes_mongodb_operator.yaml.j2',
cpu_request=FLAGS.kubernetes_mongodb_cpu_request,
cpu_limit=FLAGS.kubernetes_mongodb_cpu_limit,
memory_request=FLAGS.kubernetes_mongodb_memory_request,
memory_limit=FLAGS.kubernetes_mongodb_memory_limit,
disk_size=FLAGS.kubernetes_mongodb_disk_size,
storage_class=storage_class,
admin_password=admin_password)
time.sleep(60)
benchmark_spec.container_cluster.WaitForResource('pod/mongodb-0', 'Ready')
mongodb_cluster_ip = benchmark_spec.container_cluster.GetClusterIP(
'mongodb-service')
benchmark_spec.mongodb_url = 'mongodb://ycsb:{password}@{ip_address}:27017/ycsb?authSource=ycsb'.format(
password=admin_password, ip_address=mongodb_cluster_ip)
def Prepare(benchmark_spec):
"""Install MongoDB on one VM and YCSB on another.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
server_partials = [functools.partial(_PrepareDeployment, benchmark_spec)]
client_partials = [
functools.partial(_PrepareClient, client)
for client in benchmark_spec.vm_groups['clients']
]
vm_util.RunThreaded((lambda f: f()), server_partials + client_partials)
benchmark_spec.executor = ycsb.YCSBExecutor('mongodb', cp=ycsb.YCSB_DIR)
def Run(benchmark_spec):
return mongodb_ycsb_benchmark.Run(benchmark_spec)
def Cleanup(benchmark_spec):
"""Remove MongoDB and YCSB.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
del benchmark_spec
|
GoogleCloudPlatform/PerfKitBenchmarker
|
perfkitbenchmarker/linux_benchmarks/kubernetes_mongodb_ycsb_benchmark.py
|
Python
|
apache-2.0
| 5,544 | 0.005051 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.