text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='hqueue',
version='0.2.0',
license='BSD',
description='asyncio.Queue with history',
long_description='%s\n\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Henry S. Harrison',
author_email='henry.schafer.harrison@gmail.com',
url='https://github.com/hsharrison/history-queue',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=True,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
keywords=[
'asyncio', 'deque', 'queue', 'history',
],
extras_require={
'test': ['pytest', 'pytest-cov', 'hypothesis', 'toolz'],
}
)
| hsharrison/history-queue | setup.py | Python | bsd-2-clause | 1,610 | 0.001242 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import DataBoxManagementClientConfiguration
from .operations import Operations
from .operations import JobsOperations
from .operations import ServiceOperations
from . import models
class DataBoxManagementClient(object):
"""The DataBox Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.databox.operations.Operations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.databox.operations.JobsOperations
:ivar service: ServiceOperations operations
:vartype service: azure.mgmt.databox.operations.ServiceOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Subscription Id.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = DataBoxManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.jobs = JobsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service = ServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> DataBoxManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2020_11_01/_data_box_management_client.py | Python | mit | 3,173 | 0.001891 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import re
import tablemanager.models
import django.utils.timezone
import borg_utils.resource_status
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('tablemanager', '0020_workspace_auth_level'),
]
operations = [
migrations.CreateModel(
name='Style',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.SlugField(help_text='Name of Publish', max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('description', models.CharField(max_length=512, null=True, blank=True)),
('status', models.CharField(default=b'Enabled', max_length=32, choices=[(b'Enabled', b'Enabled'), (b'Disabled', b'Disabled')])),
('sld', tablemanager.models.XMLField(help_text='Styled Layer Descriptor', null=True, blank=True)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('publish', models.ForeignKey(to='tablemanager.Publish')),
],
options={
'ordering': ('publish', 'name'),
},
bases=(models.Model, borg_utils.resource_status.ResourceStatusMixin),
),
migrations.AlterUniqueTogether(
name='style',
unique_together=set([('publish', 'name')]),
),
migrations.AddField(
model_name='publish',
name='default_style',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to='tablemanager.Style', null=True),
preserve_default=True,
),
]
| parksandwildlife/borgcollector | tablemanager/migrations/0021_auto_20160219_0803.py | Python | bsd-3-clause | 2,007 | 0.003986 |
class HostCommands(object):
def __new__(cls, host):
if host.is_ol6:
return HostCommandOL6(host)
if host.is_ol7:
return HostCommandOL7(host)
class HostBaseCommands(object):
def __init__(self, host):
self.host = host
self.infra = host.infra
self.engine_name = host.infra.engine.name
def exec_service_command(self, service_name, action, no_output=False):
cmd = self.command_tmpl.format(
service_name=service_name,
action=action
)
if no_output:
cmd += ' > /dev/null'
return cmd
def init_database_script(self, action, instances, no_output=True):
script = ''
for instance in instances:
script += "{};".format(self.database(
action=action
))
if instance.is_sentinel:
script += "{};".format(self.secondary_service(
action=action,
no_output=True
))
return script
def secondary_service(self, action, no_output=True):
return self.exec_service_command(
service_name=self.SECONDARY_SERVICE_NAME_BY_ENGINE[
self.engine_name
],
action=action,
no_output=no_output
)
def database(self, action, no_output=True):
return self.exec_service_command(
service_name=self.PRIMARY_SERVICE_NAME_BY_ENGINE[self.engine_name],
action=action,
no_output=no_output
)
def monit_script(self, action='start'):
return """
echo ""; echo $(date "+%Y-%m-%d %T") "- Monit"
{}
""".format(
self.exec_service_command(
service_name='monit',
action=action
)
)
def rsyslog(self, action, no_output=False):
return self.exec_service_command(
service_name='rsyslog',
action=action,
no_output=no_output
)
def telegraf(self, action, no_output=False):
return self.exec_service_command(
service_name='telegraf',
action=action,
no_output=no_output
)
def httpd(self, action, no_output=False):
return self.exec_service_command(
service_name='httpd',
action=action,
no_output=no_output
)
def heartbeat(self, action, no_output=False):
return self.exec_service_command(
service_name='pt-heartbeat',
action=action,
no_output=no_output
)
@property
def command_tmpl(self):
raise NotImplementedError()
class HostCommandOL6(HostBaseCommands):
PRIMARY_SERVICE_NAME_BY_ENGINE = {
'mongodb': 'mongodb',
'redis': 'redis',
'mysql': 'mysql',
'mysql_percona': 'mysql'
}
SECONDARY_SERVICE_NAME_BY_ENGINE = {
'mongodb': 'mongodb',
'redis': 'sentinel',
'mysql': '',
'mysql_percona': ''
}
command_tmpl = '/etc/init.d/{service_name} {action}'
class HostCommandOL7(HostBaseCommands):
PRIMARY_SERVICE_NAME_BY_ENGINE = {
'mongodb': 'mongodb',
'redis': 'redis',
'mysql': 'mysql',
'mysql_percona': 'mysql'
}
SECONDARY_SERVICE_NAME_BY_ENGINE = {
'mongodb': 'mongodb',
'redis': 'sentinel',
'mysql': '',
'mysql_percona': ''
}
command_tmpl = 'sudo systemctl {action} {service_name}.service'
| globocom/database-as-a-service | dbaas/physical/commands.py | Python | bsd-3-clause | 3,573 | 0 |
# --- job 1/8 & 2/8 : check_etablissements & extract_etablissements
DISTINCT_DEPARTEMENTS_HAVING_OFFICES = 15
# --- job 5/8 : compute_scores
SCORE_COEFFICIENT_OF_VARIATION_MAX = 8
MINIMUM_OFFICES_REQUIRED_TO_TRAIN_MODEL = 0
MAXIMUM_COMPUTE_SCORE_JOB_FAILURES = 94 # 96 departements == 2 successes + 94 failures
RMSE_MAX = 300
# --- job 6/8 : validate_scores
SCORE_REDUCING_MINIMUM_THRESHOLD = 0
# SCORE_ALTERNANCE_REDUCING_MINIMUM_THRESHOLD = 0
DEPARTEMENTS_TO_BE_SANITY_CHECKED = ['14', '69']
| StartupsPoleEmploi/labonneboite | labonneboite/importer/conf/development.py | Python | agpl-3.0 | 497 | 0.002012 |
from django import forms
from ..models import BaseDemographic
class BaseDemographicForm(forms.ModelForm):
class Meta:
model = BaseDemographic
fields = ['first_name','last_name','phone','dob']
| rdespoiu/QTitan | QTitan/QTSurvey/Controllers/BaseDemographicForm.py | Python | gpl-3.0 | 214 | 0.023364 |
# All the code samples below have one parameter, which is where the protection level name
# for that storage type will be inserted, e.g. NSDataWritingFileProtectionCompleteUnlessOpen
CODE_SAMPLE_CORE_DATA = """
- (NSPersistentStoreCoordinator *)persistentStoreCoordinator {
if (persistentStoreCoordinator_ != nil) {
return persistentStoreCoordinator_;
}
persistentStoreCoordinator_ = [[NSPersistentStoreCoordinator alloc]
initWithManagedObjectModel:[self managedObjectModel]];
NSURL *storeURL = [NSURL fileURLWithPath:
[[self applicationDocumentsDirectory] stringByAppendingPathComponent: @"MyStore.sqlite"]];
[persistentStoreCoordinator_ addPersistentStoreWithType:NSSQLiteStoreType
configuration:nil URL:storeURL options:nil error:&error];
NSDictionary *fileAttributes = [NSDictionary
dictionaryWithObject:%s
forKey:NSFileProtectionKey];
[[NSFileManager defaultManager] setAttributes:fileAttributes
ofItemAtPath:[storeURL path] error: &error];
return persistentStoreCoordinator_;
}"""
CODE_SAMPLE_SQL = """
int flags = SQLITE_OPEN_CREATE |
SQLITE_OPEN_READWRITE |
%s;
sqlite3_open_v2(path, &database, flags, NULL)
// Or, if you prefer FMDB:
FMDatabase *database = [FMDatabase databaseWithPath:dbPath];
[database openWithFlags:flags]
"""
CODE_SAMPLE_RAW_DATA = """
NSData *contents = [@"secret file contents" dataUsingEncoding:NSUTF8StringEncoding];
[contents writeToFile:path
options:%s
error:&error];
"""
CODE_SAMPLE_KEYCHAIN = """
// Note that metadata, like the account name, is not encrypted.
NSDictionary *item = @{
(__bridge id)kSecAttrAccount: account,
(__bridge id)kSecClass: (__bridge id)kSecClassGenericPassword,
(__bridge id)kSecAttrAccessible: (__bridge id)%s,
(__bridge id)kSecValueData: data,
};
OSStatus error = SecItemAdd((__bridge CFDictionaryRef)item, NULL);
"""
| erikr/howtostoreiosdata | howtostoreiosdata/wizard/code_samples.py | Python | mit | 1,995 | 0.002506 |
import __init__
import math
import numpy as np
class Point:
'''Point at 3D. Some method for it. '''
def __init__ (self, x=0.0, y=0.0 ,z=0.0):
self.x=x
self.y=y
self.z=z
def coors(self):
coor=(self.x,self.y,self.z)
return coor
def dist_to(self,apoint):
return math.sqrt(math.pow(self.x - apoint.x,2) + math.pow(self.y - apoint.y,2) + math.pow(self.z - apoint.z,2))
def CopyOf(self):
return point(self.x, self.y, self.z)
def average_with(self, other_point):
return point((self.x + other_point.x) / 2.0, (self.y + other_point.y) / 2.0, (self.z + other_point.z) / 2.0)
def dot_product_with(self, other_point):
return self.x * other_point.x + self.y * other_point.y + self.z * other_point.z
def length(self):
return self.dist_to(point(0.0,0.0,0.0))
def minus(self, other_point):
return point(self.x - other_point.x, self.y - other_point.y, self.z - other_point.z)
def CreatePDBLine(self):
#if len(self.atomname) > 1: self.atomname = self.atomname[:1].upper() + self.atomname[1:].lower()
output = "ATOM "
#output = output + str(index).rjust(6) + self.atomname.rjust(5) + self.residue.rjust(4)
output = output + "5".rjust(6) + "X".rjust(5) + "XXX".rjust(4)
output = output + ("%.3f" % self.x).rjust(18)
output = output + ("%.3f" % self.y).rjust(8)
output = output + ("%.3f" % self.z).rjust(8)
output = output + "X".rjust(24) # + " " + str(uniqueID) #This last part must be removed
return output
#######basic function is put here#####
def coors_to_point(coors):
point=Point(coors[0],coors[1],coors[2])
return point
# -------------------------------- Vertor -------------------------
class Vector:
pass
# -------------------------------- Vertex -------------------------
# -------------------------------- Edge ---------------------------
# -------------------------------- Face ---------------------------
# -------------------------------- Box ----------------------------
# -------------------------------- Cubic --------------------------
# -------------------------------- Circle -------------------------
# -------------------------------- Ball ---------------------------
# -------------------------------- Grid ---------------------------
# ----------------------------- MultiGrid -------------------------
# -------------------------- High Dimension ? --------------------- | platinhom/CADDHom | python/basic/HHGeometry.py | Python | gpl-2.0 | 2,547 | 0.016097 |
from collections import namedtuple
from cairo import LINE_JOIN_ROUND
from zorro.di import di, dependency, has_dependencies
from tilenol.groups import GroupManager
from tilenol.commands import CommandDispatcher
from .base import Widget
from tilenol.theme import Theme
from tilenol.window import Window
GroupState = namedtuple(
'GroupState',
('name', 'empty', 'active', 'visible', 'urgent')
)
@has_dependencies
class State(object):
commander = dependency(CommandDispatcher, 'commander')
gman = dependency(GroupManager, 'group-manager')
def __init__(self):
self._state = None
def dirty(self):
return self._state != self._read()
def update(self):
nval = self._read()
if nval != self._state:
self._state = nval
return True
def _read(self):
cur = self.commander.get('group')
visgr = self.gman.current_groups.values()
return tuple(GroupState(g.name, g.empty, g is cur, g in visgr,
g.has_urgent_windows)
for g in self.gman.groups)
@property
def groups(self):
return self._state
@has_dependencies
class Groupbox(Widget):
theme = dependency(Theme, 'theme')
def __init__(self, *, filled=False, first_letter=False, right=False):
super().__init__(right=right)
self.filled = filled
self.first_letter = first_letter
def __zorro_di_done__(self):
self.state = di(self).inject(State())
bar = self.theme.bar
self.font = bar.font
self.inactive_color = bar.dim_color_pat
self.urgent_color = bar.bright_color_pat
self.active_color = bar.text_color_pat
self.selected_color = bar.active_border_pat
self.subactive_color = bar.subactive_border_pat
self.padding = bar.text_padding
self.border_width = bar.border_width
self.state.gman.group_changed.listen(self.bar.redraw.emit)
Window.any_window_changed.listen(self.check_state)
def check_state(self):
if self.state.dirty:
self.bar.redraw.emit()
def draw(self, canvas, l, r):
self.state.update()
assert not self.right, "Sorry, right not implemented"
self.font.apply(canvas)
canvas.set_line_join(LINE_JOIN_ROUND)
canvas.set_line_width(self.border_width)
x = l
between = self.padding.right + self.padding.left
for gs in self.state.groups:
gname = gs.name
if self.first_letter:
gname = gname[0]
sx, sy, w, h, ax, ay = canvas.text_extents(gname)
if gs.active:
canvas.set_source(self.selected_color)
if self.filled:
canvas.rectangle(x, 0, ax + between, self.height)
canvas.fill()
else:
canvas.rectangle(
x + 2, 2, ax + between - 4, self.height - 4
)
canvas.stroke()
elif gs.visible:
canvas.set_source(self.subactive_color)
if self.filled:
canvas.rectangle(x, 0, ax + between, self.height)
canvas.fill()
else:
canvas.rectangle(
x + 2, 2, ax + between - 4, self.height - 4
)
canvas.stroke()
if gs.urgent:
canvas.set_source(self.urgent_color)
elif gs.empty:
canvas.set_source(self.inactive_color)
else:
canvas.set_source(self.active_color)
canvas.move_to(x + self.padding.left,
self.height - self.padding.bottom)
canvas.show_text(gname)
x += ax + between
return x, r
| tailhook/tilenol | tilenol/widgets/groupbox.py | Python | mit | 3,880 | 0.000258 |
import os
import sys
import golem
from golem.core import (utils, session, suite as suite_module, test,
settings_manager, test_directory)
from golem.core.project import Project, create_project
from golem.gui import gui_start
from golem.test_runner.execution_runner import ExecutionRunner
from golem.test_runner import interactive as interactive_module
from golem.gui.user_management import Users
from . import messages
def command_dispatcher(args):
if args.help:
display_help(args.help, args.command)
elif args.command == 'run':
run_command(args.project, args.test_query, args.browsers, args.processes,
args.environments, args.interactive, args.timestamp, args.report,
args.report_folder, args.report_name, args.tags, args.cli_log_level)
elif args.command == 'gui':
gui_command(args.host, args.port, args.debug)
elif args.command == 'createproject':
createproject_command(args.project)
elif args.command == 'createtest':
createtest_command(args.project, args.test)
elif args.command == 'createsuite':
createsuite_command(args.project, args.suite)
elif args.command == 'createuser':
createuser_command()
elif args.command == 'createsuperuser':
createsuperuser_command(args.username, args.email, args.password, args.noinput)
elif args.command is None:
if args.version:
display_version()
else:
print(messages.USAGE_MSG)
def display_help(help, command):
if help == 'run' or command == 'run':
print(messages.RUN_USAGE_MSG)
elif help == 'gui' or command == 'gui':
print(messages.GUI_USAGE_MSG)
elif help == 'createproject' or command == 'createproject':
print(messages.CREATEPROJECT_USAGE_MSG)
elif help == 'createtest' or command == 'createtest':
print(messages.CREATETEST_USAGE_MSG)
elif help == 'createsuite' or command == 'createsuite':
print(messages.CREATESUITE_USAGE_MSG)
elif help == 'createsuperuser' or command == 'createsuperuser':
print(messages.CREATESUPERUSER_USAGE_MSG)
else:
print(messages.USAGE_MSG)
def run_command(project='', test_query='', browsers=None, processes=1,
environments=None, interactive=False, timestamp=None,
reports=None, report_folder=None, report_name=None,
tags=None, cli_log_level=None):
execution_runner = ExecutionRunner(browsers, processes, environments, interactive,
timestamp, reports, report_folder, report_name, tags)
if project:
if test_directory.project_exists(project):
execution_runner.project = project
session.settings = settings_manager.get_project_settings(project)
# add --interactive value to settings to make
# it available from inside a test
session.settings['interactive'] = interactive
# override cli_log_level setting if provided by the CLI
if cli_log_level:
session.settings['cli_log_level'] = cli_log_level.upper()
if test_query:
norm_query = utils.normalize_query(test_query)
if suite_module.Suite(project, norm_query).exists:
execution_runner.run_suite(norm_query)
elif test.Test(project, norm_query).exists:
execution_runner.run_test(norm_query)
else:
if test_query == '.':
test_query = ''
path = os.path.join(session.testdir, 'projects',
project, 'tests', test_query)
if os.path.isdir(path):
execution_runner.run_directory(test_query)
else:
msg = ('golem run: error: the value {} does not match '
'an existing test, suite or directory'.format(test_query))
sys.exit(msg)
else:
print(messages.RUN_USAGE_MSG)
tests = Project(project).test_tree
print('Tests:')
utils.display_tree_structure_command_line(tests['sub_elements'])
suites = Project(project).suite_tree
print('\nTest Suites:')
# TODO print suites in structure
for suite in suites['sub_elements']:
print(' ' + suite['name'])
else:
msg = ('golem run: error: the project {} does not exist'.format(project))
sys.exit(msg)
elif interactive:
interactive_module.interactive(session.settings, browsers)
else:
print(messages.RUN_USAGE_MSG)
print('Projects:')
for project in test_directory.get_projects():
print(' {}'.format(project))
def gui_command(host=None, port=5000, debug=False):
gui_start.run_gui(host, port, debug)
def createproject_command(project):
if test_directory.project_exists(project):
msg = ('golem createproject: error: a project with name \'{}\' already exists'
.format(project))
sys.exit(msg)
else:
create_project(project)
def createtest_command(project, test_name):
if not test_directory.project_exists(project):
msg = ('golem createtest: error: a project with name {} '
'does not exist'.format(project))
sys.exit(msg)
test_name = test_name.replace(os.sep, '.')
errors = test.create_test(project, test_name)
if errors:
sys.exit('golem createtest: error: {}'.format(' '.join(errors)))
def createsuite_command(project, suite_name):
if not test_directory.project_exists(project):
msg = ('golem createsuite: error: a project with name {} '
'does not exist'.format(project))
sys.exit(msg)
errors = suite_module.create_suite(project, suite_name)
if errors:
sys.exit('golem createsuite: error: {}'.format(' '.join(errors)))
# TODO deprecated
def createuser_command():
sys.exit('Error: createuser command is deprecated. Use createsuperuser instead.')
def createsuperuser_command(username, email, password, no_input=False):
if no_input:
if username is None or password is None:
sys.exit('Error: --username and --password are required for --noinput.')
else:
try:
while True:
username = input('Username: ').strip()
if username:
break
while True:
email = input('Email address (optional): ').strip()
if email and not utils.validate_email(email):
print('Error: Enter a valid email address.')
else:
break
while True:
password = input('Password: ')
repeat_password = input('Password (again): ')
if not len(password):
print('Error: Blank passwords are not allowed.')
elif password != repeat_password:
print('Error: The passwords did not match.')
else:
break
except KeyboardInterrupt:
sys.exit('Cancelled.')
errors = Users.create_super_user(username, password, email)
if errors:
for error in errors:
print('Error: {}'.format(error))
exit(1)
else:
print('Superuser {} was created successfully.'.format(username))
def createdirectory_command(dir_name, no_confirm=False):
"""Create a new Golem test directory
dir_name must be an absolute or relative path.
If the path exists and is not empty and no_confirm
is False the user will be prompted to continue.
"""
abspath = os.path.abspath(dir_name)
if os.path.exists(abspath) and os.listdir(abspath):
# directory is not empty
if not no_confirm:
msg = 'Directory {} is not empty, continue? [Y/n]'.format(dir_name)
if not utils.prompt_yes_no(msg):
return
if os.path.isfile(os.path.join(abspath, '.golem')):
sys.exit('Error: target directory is already an existing Golem test directory')
session.testdir = abspath
test_directory.create_test_directory(abspath)
print('New golem test directory created at {}'.format(abspath))
print('Use credentials to access the GUI module:')
print('user: admin')
print('password: admin')
def display_version():
print('Golem version {}'.format(golem.__version__))
| lucianopuccio/golem | golem/cli/commands.py | Python | mit | 8,682 | 0.001497 |
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
print test_set_x.get_value()[0].shape
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
| webeng/DeepLearningTutorials | code/convolutional_mlp.py | Python | bsd-3-clause | 12,771 | 0.001175 |
#!/usr/bin/env python
# 2015 Copyright (C) White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from white.orm import Backend
from white.model import Pair
from flask.json import dumps
import re
class StorageService(object):
def __init__(self):
self.pair_repo = Backend('storage')
def site_meta(self):
return self.pair_repo.find('system')
def update_site_meta(self, sitename, description, site_page,
posts_per_page, auto_published_comments, comment_moderation_keys):
meta = self.site_meta()
config = meta.json_value()
try:
sitename = sitename or sitename.strip()
if sitename:
config['sitename'] = sitename
description = description or description.strip()
if description:
config['description'] = description
site_page = int(site_page)
if site_page >= 0:
config['site_page'] = site_page
posts_per_page = int(posts_per_page)
if posts_per_page:
config['posts_per_page'] = posts_per_page
auto_published_comments = bool(auto_published_comments)
config['auto_published_comments'] = auto_published_comments
if comment_moderation_keys is not None:
keys = [key.strip() for key in re.split(' +', comment_moderation_keys) if key.strip()]
config['comment_moderation_keys'] = keys
meta.value = dumps(config)
self.pair_repo.update(meta)
return True
except:
return False
| 7anshuai/white | white/domain/storage.py | Python | gpl-2.0 | 2,170 | 0.001382 |
#!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ED = 2
class Cert_6_4_1_LinkLocal(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED].start()
time.sleep(3)
self.assertEqual(self.nodes[ED].get_state(), 'child')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
if addr[0:4] == 'fe80':
self.nodes[LEADER].ping(addr, size=256)
self.nodes[LEADER].ping(addr)
self.nodes[LEADER].ping('ff02::1', size=256)
self.nodes[LEADER].ping('ff02::1')
if __name__ == '__main__':
unittest.main()
| JiahuiZHONG/Internship_Thread | tests/scripts/thread-cert/Cert_6_4_01_LinkLocal.py | Python | bsd-3-clause | 2,986 | 0.001005 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from pyglet.window.key import user_key
from pyglet.window.mouse import LEFT as MOUSE_LEFT, RIGHT as MOUSE_RIGHT
from mmfparser.player.common import PlayerChild
from mmfparser.player.eventdispatcher import EventDispatcher
DIRECTIONAL_CONTROLS = ('Up', 'Down', 'Left', 'Right')
KEY_LIST = ('Up', 'Down', 'Left', 'Right', 'Button1', 'Button2', 'Button3',
'Button4')
UP, DOWN, LEFT, RIGHT, BUTTON1, BUTTON2, BUTTON3, BUTTON4 = xrange(8)
class Player(PlayerChild, EventDispatcher):
name = ''
keys = None
keyList = None
keyNames = None
pressed_keys = None
lives = None
score = None
controls_ignored = False
use_mouse = False
def initialize(self, control):
controlType = control.getControlType()
if controlType != 'Keyboard':
raise NotImplementedError(
'control type %r unsupported' % controlType)
keys = control.keys
convert = self.player.keyboard.convert
self.keyList = keyList = []
self.keyNames = keyNames = []
for key in (keys.up, keys.down, keys.left, keys.right, keys.button1,
keys.button2, keys.button3, keys.button4):
keyList.append(convert(key.getValue()))
keyNames.append(key.getName())
self.keys = keys = {}
for index, key in enumerate(KEY_LIST):
keys[key] = keyList[index]
self.symbol_to_key = dict([(v, k) for k, v in keys.iteritems()])
self.reset()
self.player.window.push_handlers(
on_key_press = self.key_pressed,
on_key_release = self.key_released
)
self.player.mouse.push_handlers(
on_mouse_press = self.mouse_pressed,
on_mouse_release = self.mouse_released
)
def mouse_pressed(self, x, y, symbol, modifiers):
if self.controls_ignored or not self.use_mouse:
return
if symbol == MOUSE_LEFT:
self.dispatch_event('player_key_pressed', 'Button1')
elif symbol == MOUSE_RIGHT:
self.dispatch_event('player_key_pressed', 'Button2')
def mouse_released(self, x, y, symbol, modifiers):
if self.controls_ignored or not self.use_mouse:
return
if symbol == MOUSE_LEFT:
self.dispatch_event('player_key_released', 'Button1')
elif symbol == MOUSE_RIGHT:
self.dispatch_event('player_key_released', 'Button2')
def key_pressed(self, symbol, modifiers):
if self.controls_ignored:
return
try:
key = self.symbol_to_key[symbol]
if self.use_mouse and key in ('Button1', 'Button2'):
return
self.dispatch_event('player_key_pressed', key)
except KeyError:
pass
def key_released(self, symbol, modifiers):
if self.controls_ignored:
return
try:
key = self.symbol_to_key[symbol]
if self.use_mouse and key in ('Button1', 'Button2'):
return
self.dispatch_event('player_key_released', key)
except KeyError:
pass
def is_down(self, key):
if self.controls_ignored:
return False
if self.use_mouse:
if key == 'Button1':
return self.player.mouse.left
elif key == 'Button2':
return self.player.mouse.right
return self.player.keyboard[self.keys[key]]
def is_down_index(self, value):
if self.controls_ignored:
return False
if self.use_mouse:
if value == BUTTON1:
return self.player.mouse.left
elif value == BUTTON2:
return self.player.mouse.right
return self.player.keyboard[self.keyList[value]]
def set_key(self, index, key):
code = self.player.keyboard.convert(key.getValue())
name_key = KEY_LIST[index]
name = key.getName()
self.keyList[index] = code
self.keyNames[index] = name
self.keys[name_key] = code
self.symbol_to_key[code] = name_key
def set_score(self, value):
self.score = value
self.dispatch_event('score_changed', value)
def set_lives(self, value):
self.lives = value
self.dispatch_event('lives_changed', value)
def reset(self, frame = False):
self.controls_ignored = False
if frame:
return
header = self.player.gameData.header
self.lives = header.initialLives
self.score = header.initialScore
Player.register_event_type('player_key_pressed')
Player.register_event_type('player_key_released')
Player.register_event_type('score_changed')
Player.register_event_type('lives_changed')
class Players(PlayerChild):
items = None
def initialize(self):
header = self.player.gameData.header
self.items = items = []
for control in header.controls.items:
player = self.new(Player)
player.initialize(control)
items.append(player)
def reset(self, frame = False):
for player in self.items:
player.reset(frame) | joaormatos/anaconda | mmfparser/player/players.py | Python | gpl-3.0 | 6,005 | 0.006828 |
import inspect
import pytest
def parametrize(tests, arity=None):
'''Helper for parametrizing pytest tests.
Expects a list of lambdas, one per test. Each lambda must return
the parameters for its respective test.
Test identifiers will be automatically generated, from the test
number and its lambda definition line (1.10, 2.12, 3.20, ...).
If arity is None, the arguments being parametrized will be automatically
set from the function's last arguments, according to the numbers of
parameters for each test.
'''
ids = []
argvalues = []
for n, t in enumerate(tests):
line = inspect.getsourcelines(t)[1]
ids.append('%u:%u' % (n+1, line))
argvalues.append(t())
if arity is None:
arity = len(argvalues[0])
assert arity > 0
def decorator(fn):
argnames = list(
parameter.name
for parameter in inspect.signature(fn).parameters.values()
if parameter.default is inspect.Parameter.empty
)[-arity:]
if arity == 1:
argnames = argnames[0]
return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn)
return decorator
| stanographer/plover | plover_build_utils/testing/parametrize.py | Python | gpl-2.0 | 1,189 | 0.000841 |
#!/usr/bin/python2.7
# cuon_server install
import os, sys, platform
import subprocess, shlex, shutil
import commands
import locale
import pwd, grp
from gi.repository import Gtk
import ConfigParser
class cssi():
def __init__(self, user=None):
self.user = user
self.win = None
self.grid = None
self.CalendarDir="/usr/local/iCal"
self.program_names = ["Postgres", "Subversion", "ssh"]
self.programs = []
self.programs_gentoo = ["/usr/bin/postmaster", "/usr/bin/svn", "/usr/bin/ssh-keygen"]
self.programs_ubuntu = ["/usr/bin/pg_ctlcluster", "/usr/bin/svn", "/usr/bin/ssh-keygen"]
self.programs_debian = ["/usr/bin/pg_ctlcluster", "/usr/bin/svn", "/usr/bin/ssh-keygen"]
self.program_installer_gentoo = [{"Postgres":["dev-db/postgresql-server", "app-admin/eselect-postgresql", "dev-db/postgresql-docs"]}, {"Subversion":["dev-vcs/subversion"]}, {"ssh":["virtual/ssh"]}]
self.program_installer_ubuntu = [{"Postgres":["postgresql-9.1", "postgresql-client-9.1"]},{"Subversion":["subversion"]}, {"ssh":["ssh"]}]
self.program_installer_debian = [{"Postgres":["postgresql-9.1", "postgresql-client-9.1"]},{"ssh":["ssh"]} ]
self.program_installer = []
self.programs_exist = []
self.python_modules = ["PIL", "reportlab", "twisted.web", "twisted.words", "pygments", "webkit", "pg"]
self.python_modules_exist = []
self.python_installer = []
self.python_installer_gentoo = [{"PIL":["dev-python/imaging"]}, {"reportlab":["dev-python/reportlab"]}, {"twisted.web":["dev-python/twisted-web"]}, {"twisted.words":[]}, {"pygments":[]}, {"webkit":["dev-python/pywebkitgtk"]},{"pg":[]} ]
self.python_installer_ubuntu = [{"PIL":["python-imaging", "python-imaging-sane"]}, {"reportlab":["python-reportlab"]} , {"twisted.web":["python-twisted-web" ]}, {"twisted.words":["python-twisted-words"]}, {"pygments":["python-pygments"]}, {"webkit":["python-webkit"]},{"pg":["python-pygresql"]} ]
self.python_installer_debian = []
self.OS_Installer = None
self.OS = None
self.Sudo = "" # or gksu
self.Terminals = ["gnome-terminal", "konsole", "xfce4-terminal", "terminal", "xterm"]
self.Terminal = None
self.cpServer = ConfigParser.ConfigParser()
self.CUON_FS = "/etc/cuon"
self.dia = MessageDialogWindow()
def checkOS(self):
print "start check OS"
if sys.platform.startswith('linux'):
# Linux-specific code here...
os_desc = os.uname()[1].upper()
os_name = os.uname()[2].upper()
os_machine = os.uname()[3].upper()
os_type = os.uname()[4].upper()
os_dist = platform.linux_distribution()[0].upper()
print os_dist, os_name, os_machine, os_type
if os_dist.find("GENTOO")>= 0:
if os.path.exists("/usr/bin/emerge"):
self.OS = "GENTOO"
self.program_installer = self.program_installer_gentoo
self.python_installer = self.python_installer_gentoo
self.programs = self.programs_gentoo
print 'Check1', self.programs , self.programs_gentoo
self.OS_Installer = "/usr/bin/emerge "
elif os_dist.find("UBUNTU")>= 0:
if os.path.exists("/usr/bin/apt-get"):
self.OS = "UBUNTU"
self.program_installer = self.program_installer_ubuntu
self.python_installer = self.python_installer_ubuntu
self.programs = self.programs_ubuntu
self.OS_Installer = "/usr/bin/apt-get install "
elif os_dist.find("DEBIAN")>= 0:
if os.path.exists("/usr/bin/apt-get"):
self.OS = "DEBIAN"
self.program_installer = self.program_installer_debian
self.python_installer = self.python_installer_debian
self.programs = self.programs_debian
self.OS_Installer = "/usr/bin/apt-get install "
print "OS = ", self.OS
for j in self.Terminals:
if os.path.exists("/usr/bin/" + j):
self.Terminal = "/usr/bin/" + j
print "Terminal = " + self.Terminal
break
def checkEnvironment(self):
self.programs_exist = []
self.python_modules_exist = []
print 'programs = ', self.programs
for program in self.programs:
print program
if os.path.exists(program):
self.programs_exist.append(True)
else:
self.programs_exist.append(False)
print 'Exist 8', self.programs, self.programs_exist
for python_module in self.python_modules:
try:
print python_module
if python_module == "webkit":
if os.path.exists("/usr/lib/python2.7/site-packages/webkit/webkit.so"):
self.python_modules_exist.append(True)
elif os.path.exists("/usr/lib/python2.7/dist-packages/webkit/webkit.so"):
self.python_modules_exist.append(True)
else:
self.python_modules_exist.append(False)
else:
if __import__(python_module):
self.python_modules_exist.append(True)
except ImportError:
self.python_modules_exist.append(False)
except:
self.python_modules_exist.append(False)
print 'Exist 9', self.python_modules, self.python_modules_exist
def on_button_clicked(self, widget):
print "Hello World"
self.dia.wrong_requirement()
def start(self, again=False):
print 'again', again
self.checkOS()
self.checkEnvironment()
if not again:
self.win = Gtk.Window()
self.win.connect("delete-event", Gtk.main_quit)
self.button = Gtk.Button(label="Next")
self.button.connect("clicked", self.on_check_missing)
if again:
self.win.remove(self.grid)
self.grid = Gtk.Table(10, 4, True)
z= 0
print self.programs_exist
for name in self.program_names:
print z, self.programs_exist[z]
self.grid.attach(Gtk.Label(name), 0, 1, z, z+1)
self.grid.attach(Gtk.Label(`self.programs_exist[z]`), 1, 2, z,z+1)
z += 1
z = 0
for pName in self.python_modules:
l1 = Gtk.Label(pName)
l1.set_justify(Gtk.Justification.LEFT)
self.grid.attach(l1, 3, 4, z,z+1, 0, 0.5, 0, 0.5)
self.grid.attach(Gtk.Label(`self.python_modules_exist[z]`), 4, 5,z, z+1, 0, 0.5, 0, 0.5)
z += 1
if not again:
self.grid.attach(self.button, 4, 5, 9 , 10)
self.win.add(self.grid)
self.win.show_all()
self.dia.warn(self.user)
if self.dia.Q2 == False:
sys.exit(0)
if not again:
Gtk.main()
def on_check_missing(self, widget, again=False):
if again:
self.start(again)
if not self.Terminal:
self.dia.AbortInfo1()
sys.exit(0)
if False in self.python_modules_exist or False in self.programs_exist:
if again:
self.dia.error1()
sys.exit(0)
self.dia.wrong_requirement()
print 'q1', self.dia.Q1
if self.dia.Q1:
self.try_install_missing_programs()
else:
# All is ok, next step ssh
self.configure_ssh()
def try_install_missing_programs(self):
s = ""
for i in range(len(self.programs_exist)):
if not self.programs_exist[i]:
print self.programs_exist
print self.program_installer
print self.program_names
print self.program_names[i]
for j in self.program_installer[i][self.program_names[i]]:
s += j + " "
if s:
s = self.Terminal + ' -e "' + self.Sudo + " " + self.OS_Installer +" " + s +'"'
print s
#shellcommand = shlex.split('"' + s + '"')
liStatus = subprocess.call(args = s, shell = True)
s = ""
for i in range(len(self.python_modules_exist)):
if not self.python_modules_exist[i]:
try:
print i, self.python_modules[i], self.python_installer[i]
print self.python_installer[i][self.python_modules[i]]
for j in self.python_installer[i][self.python_modules[i]]:
s += j + " "
except:
pass
if s:
s = self.Terminal + ' -e "' + self.Sudo + " " +self.OS_Installer +' ' + s +'"'
print "start Terminal with " + s
#shellcommand = shlex.split(s )
#print shellcommand
liStatus = subprocess.call(args=s, shell=True )
print liStatus
self.checkEnvironment()
self.on_check_missing(None, again=True)
def configure_ssh(self):
self.dia.sshInfo1()
print 'open ssh terminal 1'
# generate key
s = self.Terminal + ' -e "' + " /usr/bin/ssh-keygen -t rsa -f /root/cuon_server_key" + '"'
liStatus = subprocess.call(args=s, shell=True )
print 'ok, done', s
#copy to user .ssh as id_rsa
s = self.Terminal + ' -e "' + self.Sudo +" mkdir /home/" + self.user + "/.ssh ; mv /root/cuon_server_key /home/" + self.user + "/.ssh/id_rsa ; mv /root/cuon_server_key.pub /home/" + self.user + "/.ssh/id_rsa.pub ; chown " + self.user + ":" + self.user + " /home/" + self.user + "/.ssh/id_rsa* ; /mkdir /root/.ssh " + '"'
print s
liStatus = subprocess.call(args=s, shell=True )
# insert them to the authorized_keys
s = self.Terminal + ' -e "' + self.Sudo +" cat /home/" + self.user + "/.ssh/cuon_server_key.pub >> /root/.ssh/authorized_keys " + '"'
print s
liStatus = subprocess.call(args=s, shell=True )
# next Step postgres
self.configure_postgres()
def configure_postgres(self):
# insert line at pg_hba.conf
# at a line to the pg_hba.conf
# check different locations, very bad (
h = None
z=0
for j in ["/etc/postgresql/9.1/main/pg_hba.conf", "/etc/postgresql-9.1/pg_hba.conf", "/etc/postgresql-9.1/pg_hba.conf", "/etc/postgresql-9.0/pg_hba.conf"]:
if os.path.exists(j):
h= j
break
z+= 1
if h:
f = open(h, 'r')
s = f.readline()
newConf = ""
while s:
#print s
#print s[0:5]
if s[0:5]== "local":
print "local found", s, s.find("postgres")
if s.find("postgres") > 0:
print "postgres in local found"
s = s.replace("peer", "trust")
print "replaced = ", s
newConf += s
s = f.readline()
#print newConf
f.close()
f = open(h, 'w')
f.write(newConf)
f.close()
f = open(h, 'a')
f.write("# NEW generated Line for the cuon Database \nlocal cuon all trust\n")
f.close()
#s = self.Terminal + ' -e ' + self.Sudo + ' echo "# NEW generated Line for the cuon Database \nlocal cuon all trust \n" >> ' + h
#print s
#iStatus = subprocess.call(args=s, shell=True )
s = None
if self.OS in ["DEBIAN", "UBUNTU"]:
#s = self.Terminal + ' -e ' + "/etc/init.d/postgresql restart "
s = self.Terminal + ' -e ' + '"/etc/init.d/postgresql restart"'
if self.OS == "GENTOO":
if z == 2:
s = self.Terminal + ' -e ' + '"/etc/init.d/postgresql-9.1 restart" '
elif z == 3:
s = self.Terminal + ' -e ' + '"/etc/init.d/postgresql-9.0 restart " '
if s:
print s
iStatus = subprocess.call(args=s, shell=True )
else:
sys.exit(0)
#ok, create database and user
#set the path
pa = ""
for j in ["/usr/lib/postgresql-9.1/bin", "/usr/lib/postgresql-9.0/bin","/usr/lib/postgresql/9.0/bin", "/usr/lib/postgresql/9.1/bin" ]:
if os.path.exists(j):
pa = j + "/"
break
s = self.Terminal + ' -e ' + pa + '"createdb -Upostgres -E utf-8 cuon" '
print "create database = " , s
liStatus = subprocess.call(args=s, shell=True )
s = self.Terminal + ' -e ' + pa + '"createlang -Upostgres -d cuon plpgsql"'
liStatus = subprocess.call(args=s, shell=True )
s = self.Terminal + ' -e ' + pa + '"createuser -Upostgres -d -s cuon_admin"'
liStatus = subprocess.call(args=s, shell=True )
s = self.Terminal + ' -e ' + pa + '"createuser -Upostgres -D -S -R zope"'
liStatus = subprocess.call(args=s, shell=True )
self.configure_cuon()
def configure_cuon(self):
# version 83
setupDir = "/home/" + self.user+ "/Projekte/"
setupStartDir = setupDir + "cuon/cuon_client"
try:
sLocale = locale.getdefaultlocale()[0].split("_")[0]
except Exception, params:
print Exception, params
sLocale = "us"
if not os.path.exists(setupDir):
os.mkdir(setupDir)
os.chdir(setupDir)
s = self.Terminal + ' -e ' + '"svn co -r 83 https://cuon.svn.sourceforge.net/svnroot/cuon cuon "'
print s
liStatus = subprocess.call(args=s, shell=True )
print "get svn ", liStatus
# now write the setup.ini
os.chdir(setupStartDir)
f = open("cuon_setup.ini", "w")
s = "[local]\nxmlrpc_port = 7080\nprotocol = http\ndescription = Install on Localhost\nssh_port = 22\nip = 127.0.0.1\ndefault = True\nlocale = " + sLocale + "\ncuonadmin = cuon_admin"
f.write(s)
f.close()
os.chdir(setupDir +"cuon/LGPL")
s = self.Terminal + ' -e ' + '"tar -xvzf iCalendar-0.11.tgz ; cd iCalendar ; python setup.py install"'
print s
liStatus = subprocess.call(args=s, shell=True )
dirList=os.listdir(setupDir)
os.chown(setupDir, pwd.getpwnam(self.user).pw_gid ,grp.getgrnam( self.user).gr_gid )
for fname in dirList:
os.chown(fname, pwd.getpwnam(self.user).pw_gid ,grp.getgrnam( self.user).gr_gid )
# now write install config files
# Now write the config files
server_ini = self.CUON_FS + "/server.ini"
if not os.path.exists(server_ini):
shutil.copy(setupDir +"cuon/cuon_server/examples/server.ini", self.CUON_FS )
shutil.copy(setupDir +"cuon/cuon_server/examples/user_cfg", self.CUON_FS )
shutil.copy(setupDir +"cuon/cuon_server/examples/clients.ini", self.CUON_FS )
shutil.copy(setupDir +"cuon/cuon_server/examples/menus.ini", self.CUON_FS )
try:
self.cpServer, f = self.getParser(self.CUON_FS + "/server.ini")
# Instances
value = self.getConfigOption('INSTANCES','XMLRPC')
if value:
self.XMLRPC_INSTANCES = int(value)
except:
pass
class MessageDialogWindow(Gtk.Window):
def __init__(self):
self.Q1 = False
self.Q2 = False
def AbortInfo1(self):
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK, "A valid terminal is missing, we must abort, sorry.")
dialog.format_secondary_text(
"Install a Terminal Emulator like Gnome-Terminal, Konsole, terminal or similar")
dialog.run()
print "INFO dialog closed"
dialog.destroy()
def sshInfo1(self):
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK, "We install now a pgp key for root access to the cuon-server")
dialog.format_secondary_text(
"Please, press ONLY Enter in the terminal window !! No Passphrase is allowed !!")
dialog.run()
print "INFO dialog closed"
dialog.destroy()
def sshInfo2(self):
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK, "We install now a pgp key at the authorized_keys file")
dialog.format_secondary_text(
"Perhaps you must enter a password for su or sudo.")
dialog.run()
print "INFO dialog closed"
dialog.destroy()
def error1(self):
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CANCEL, "There are again missing program files")
dialog.format_secondary_text(
"Sorry, you shall try manually install the missing files")
dialog.run()
print "ERROR dialog closed"
dialog.destroy()
def warn(self, user ):
self.Q2 = False
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL, "WARNING Warning WARNING")
dialog.format_secondary_text(
"This setup install a cuon-server on this computer. To do this, the user " + user + " get lasting root access\n Please, press cancel if you are not sure that you want this !!! PLEASE !!!!")
response = dialog.run()
if response == Gtk.ResponseType.OK:
print "WARN dialog closed by clicking OK button"
self.Q2 = True
elif response == Gtk.ResponseType.CANCEL:
print "WARN dialog closed by clicking CANCEL button"
dialog.destroy()
def wrong_requirement(self):
self.Q1 = False
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO, "Some Programs or Python Module are missing!")
dialog.format_secondary_text(
"Shall I try to install them ?")
response = dialog.run()
if response == Gtk.ResponseType.YES:
print "QUESTION dialog closed by clicking YES button"
self.Q1 = True
elif response == Gtk.ResponseType.NO:
print "QUESTION dialog closed by clicking NO button"
dialog.destroy()
#
#if [ -d $CalendarDir ]; then
# echo "dir iCal ok"
# cd $CalendarDir/iCalendar
# sudo python ./setup.py install
#
# ## create database
# #sudo su postgres
# #createdb -E utf-8 cuon
# #createlang -d cuon plpgsql
# #echo "now creating the user "zope" with no Rights"
# #createuser zope
# #echo "and this is your cuonadmin user with superrights"
# #createuser cuonadmin
#
#else
# echo " No Calendar found, something wrong! We stop it."
#fi
#
def getConfigOption(self, section, option, configParser = None):
value = None
if configParser:
cps = configParser
else:
cps = self.cpServer
if cps.has_option(section,option):
try:
value = cps.get(section, option).strip()
#print 'options = ',option, value
except:
value = None
#print 'getConfigOption', section + ', ' + option + ' = ' + value
return value
def getParser(self, sFile):
cpParser = ConfigParser.ConfigParser()
f = open(sFile)
#print 'f1 = ', f
cpParser.readfp(f)
#print 'cpp', cpParser
return cpParser, f
print sys.argv
if len(sys.argv) > 1:
print sys.argv[1]
t1 = cssi(user=sys.argv[1])
t1.start()
| CuonDeveloper/cuon | Distributionen/CuonServer/cuon-simple-server-install.py | Python | gpl-3.0 | 21,083 | 0.014846 |
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.exos import exos_config
from units.modules.utils import set_module_args
from .exos_module import TestExosModule, load_fixture
class TestExosConfigModule(TestExosModule):
module = exos_config
def setUp(self):
super(TestExosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.exos.exos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.exos.exos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.exos.exos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestExosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'exos_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_exos_config_unchanged(self):
src = load_fixture('exos_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_exos_config_src(self):
src = load_fixture('exos_config_src.cfg')
set_module_args(dict(src=src))
commands = ['configure ports 1 description-string "IDS"',
'configure snmp sysName "marble"']
self.execute_module(changed=True, commands=commands)
def test_exos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_exos_config_save_always(self):
self.run_commands.return_value = 'configure snmp sysName "marble"'
set_module_args(dict(save_when='always'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
args = self.run_commands.call_args[0][1]
self.assertIn('save configuration', args['command'])
def test_exos_config_save_changed_true(self):
src = load_fixture('exos_config_src.cfg')
set_module_args(dict(src=src, save_when='changed'))
commands = ['configure ports 1 description-string "IDS"',
'configure snmp sysName "marble"']
self.execute_module(changed=True, commands=commands)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.run_commands.call_args[0][1]
self.assertIn('save configuration', args['command'])
def test_exos_config_save_changed_true_check_mode(self):
src = load_fixture('exos_config_src.cfg')
set_module_args(dict(src=src, save_when='changed', _ansible_check_mode=True))
commands = ['configure ports 1 description-string "IDS"',
'configure snmp sysName "marble"']
self.execute_module(changed=True, commands=commands)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
def test_exos_config_save_changed_false(self):
set_module_args(dict(save_when='changed'))
self.execute_module(changed=False)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
def test_exos_config_save_modified_false(self):
mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text')
get_startup_config_text = mock_get_startup_config_text.start()
get_startup_config_text.return_value = load_fixture('exos_config_config.cfg')
set_module_args(dict(save_when='modified'))
self.execute_module(changed=False)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(get_startup_config_text.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
mock_get_startup_config_text.stop()
def test_exos_config_save_modified_true(self):
mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text')
get_startup_config_text = mock_get_startup_config_text.start()
get_startup_config_text.return_value = load_fixture('exos_config_modified.cfg')
set_module_args(dict(save_when='modified'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertTrue(self.get_config.call_count > 0)
self.assertEqual(get_startup_config_text.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
mock_get_startup_config_text.stop()
def test_exos_config_lines(self):
set_module_args(dict(lines=['configure snmp sysName "marble"']))
commands = ['configure snmp sysName "marble"']
self.execute_module(changed=True, commands=commands)
def test_exos_config_before(self):
set_module_args(dict(lines=['configure snmp sysName "marble"'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'configure snmp sysName "marble"']
self.execute_module(changed=True, commands=commands, sort=False)
def test_exos_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
set_module_args(dict(lines=['configure snmp sysName "marble"'], after=['test1', 'test2']))
commands = ['configure snmp sysName "marble"', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False)
def test_exos_config_before_after_no_change(self):
set_module_args(dict(lines=['configure snmp sysName "x870"'],
before=['test1', 'test2'],
after=['test3', 'test4']))
self.execute_module()
def test_exos_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['configure snmp sysName "x870"'], config=config))
commands = ['configure snmp sysName "x870"']
self.execute_module(changed=True, commands=commands)
def test_exos_config_match_none(self):
lines = ['configure snmp sysName "x870"']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines)
def test_exos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
self.execute_module(failed=True)
def test_exos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
self.execute_module(failed=True)
def test_exos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
self.execute_module(failed=True)
def test_exos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
self.execute_module(failed=True)
def test_exos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
self.execute_module(failed=True)
def test_exos_diff_running_unchanged(self):
args = dict(diff_against='running', _ansible_diff=True)
set_module_args(args)
self.execute_module(changed=False)
def test_exos_diff_running_unchanged_check(self):
args = dict(diff_against='running',
_ansible_diff=True,
_ansible_check_mode=True)
set_module_args(args)
self.execute_module(changed=False)
def test_exos_diff_startup_unchanged(self):
mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text')
get_startup_config_text = mock_get_startup_config_text.start()
get_startup_config_text.return_value = load_fixture('exos_config_config.cfg')
args = dict(diff_against='startup', _ansible_diff=True)
set_module_args(args)
self.execute_module(changed=False)
self.assertEqual(get_startup_config_text.call_count, 1)
mock_get_startup_config_text.stop()
def test_exos_diff_startup_changed(self):
mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text')
get_startup_config_text = mock_get_startup_config_text.start()
get_startup_config_text.return_value = load_fixture('exos_config_modified.cfg')
args = dict(diff_against='startup', _ansible_diff=True)
set_module_args(args)
self.execute_module(changed=True)
self.assertEqual(get_startup_config_text.call_count, 1)
mock_get_startup_config_text.stop()
def test_exos_diff_intended_unchanged(self):
args = dict(diff_against='intended',
intended_config=load_fixture('exos_config_config.cfg'),
_ansible_diff=True)
set_module_args(args)
self.execute_module(changed=False)
def test_exos_diff_intended_modified(self):
args = dict(diff_against='intended',
intended_config=load_fixture('exos_config_modified.cfg'),
_ansible_diff=True)
set_module_args(args)
self.execute_module(changed=True)
| maartenq/ansible | test/units/modules/network/exos/test_exos_config.py | Python | gpl-3.0 | 10,664 | 0.001407 |
from __future__ import unicode_literals
import datetime
import types
import os
from importlib import import_module
from django.utils import six
from django.db import models
from django.db.models.loading import cache
from django.db.migrations.loader import MigrationLoader
from django.utils.encoding import force_text
from django.utils.functional import Promise
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"dependencies": repr(self.migration.dependencies),
}
imports = set()
# Deconstruct operations
operation_strings = []
for operation in self.migration.operations:
name, args, kwargs = operation.deconstruct()
arg_strings = []
for arg in args:
arg_string, arg_imports = self.serialize(arg)
arg_strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = self.serialize(arg)
imports.update(arg_imports)
arg_strings.append("%s = %s" % (kw, arg_string))
operation_strings.append("migrations.%s(%s\n )" % (name, "".join("\n %s," % arg for arg in arg_strings)))
items["operations"] = "[%s\n ]" % "".join("\n %s," % s for s in operation_strings)
# Format imports nicely
imports.discard("from django.db import models")
if not imports:
items["imports"] = ""
else:
items["imports"] = "\n".join(imports) + "\n"
return (MIGRATION_TEMPLATE % items).encode("utf8")
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_module_name = MigrationLoader.migrations_module(self.migration.app_label)
app_module = cache.get_app(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_module_name)
basedir = os.path.dirname(migrations_module.__file__)
except ImportError:
# Alright, see if it's a direct submodule of the app
oneup = ".".join(migrations_module_name.split(".")[:-1])
app_oneup = ".".join(app_module.__name__.split(".")[:-1])
if oneup == app_oneup:
basedir = os.path.join(os.path.dirname(app_module.__file__), migrations_module_name.split(".")[-1])
else:
raise ImportError("Cannot open migrations module %s for app %s" % (migrations_module_name, self.migration.app_label))
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = set(["from django.db import models"])
name = "models.%s" % name
else:
imports = set(["import %s" % module])
name = path
arg_strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
arg_strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
arg_strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(arg_strings)), imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
format = "set([%s])"
elif isinstance(value, tuple):
format = "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, (datetime.datetime, datetime.date)):
return repr(value), set(["import datetime"])
# Simple types
elif isinstance(value, six.integer_types + (float, six.binary_type, six.text_type, bool, type(None))):
return repr(value), set()
# Promise
elif isinstance(value, Promise):
return repr(force_text(value)), set()
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# Special-cases, as these don't have im_class
special_cases = [
(datetime.datetime.now, "datetime.datetime.now", ["import datetime"]),
(datetime.datetime.utcnow, "datetime.datetime.utcnow", ["import datetime"]),
(datetime.date.today, "datetime.date.today", ["import datetime"]),
]
for func, string, imports in special_cases:
if func == value: # For some reason "utcnow is not utcnow"
return string, set(imports)
# Method?
if hasattr(value, "im_class"):
klass = value.im_class
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), set(["import %s" % module])
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
elif value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
elif value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
else:
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Uh oh.
else:
raise ValueError("Cannot serialize: %r" % value)
MIGRATION_TEMPLATE = """# encoding: utf8
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
dependencies = %(dependencies)s
operations = %(operations)s
"""
| ZhaoCJ/django | django/db/migrations/writer.py | Python | bsd-3-clause | 7,931 | 0.001891 |
from __future__ import absolute_import
from mock import patch
from datadog.util.hostname import get_hostname
from sentry.metrics.datadog import DatadogMetricsBackend
from sentry.testutils import TestCase
class DatadogMetricsBackendTest(TestCase):
def setUp(self):
self.backend = DatadogMetricsBackend(prefix='sentrytest.')
@patch('datadog.threadstats.base.ThreadStats.increment')
def test_incr(self, mock_incr):
self.backend.incr('foo', instance='bar')
mock_incr.assert_called_once_with(
'sentrytest.foo', 1,
tags=['instance:bar'],
host=get_hostname(),
)
@patch('datadog.threadstats.base.ThreadStats.timing')
def test_timing(self, mock_timing):
self.backend.timing('foo', 30, instance='bar')
mock_timing.assert_called_once_with(
'sentrytest.foo', 30,
sample_rate=1,
tags=['instance:bar'],
host=get_hostname(),
)
| Kryz/sentry | tests/sentry/metrics/test_datadog.py | Python | bsd-3-clause | 979 | 0 |
# -*- coding: utf-8 -*-
# A WordPress compiler plugin for Nikola
#
# Copyright (C) 2014-2015 by Felix Fontein
# Copyright (C) by the WordPress contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import unicode_literals
import os
import io
import json
import re
import sys
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, write_metadata
from nikola.utils import get_logger, STDERR_HANDLER
from . import default_filters, php, plugin_interface, shortcodes
_LOGGER = get_logger('compile_wordpress', STDERR_HANDLER)
class Context(object):
id = None
def __init__(self, id, name=None, additional_data=None):
self.id = id
self.name = name
self.__file_deps_fragment = set()
self.__file_deps_page = set()
self.__uptodate_deps_fragment = list()
self.__uptodate_deps_page = list()
self.__additional_data = additional_data or {}
self.__plugin_data = {}
def get_name(self):
return "(unknown:{0})".format(self.id) if self.name is None else self.name
def add_file_dependency(self, filename, add='both'):
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self.__file_deps_fragment.add(filename)
if add == 'page' or add == 'both':
self.__file_deps_page.add(filename)
def add_uptodate_dependency(self, uptodate_dependency, add='both'):
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self.__uptodate_deps_fragment.append(uptodate_dependency)
if add == 'page' or add == 'both':
self.__uptodate_deps_page.append(uptodate_dependency)
def has_dependencies(self):
return (len(self.__file_deps_fragment) > 0 or len(self.__file_deps_page) > 0 or
len(self.__uptodate_deps_fragment) > 0 or len(self.__uptodate_deps_page) > 0)
def get_file_dependencies_fragment(self):
return sorted(list(self.__file_deps_fragment))
def get_file_dependencies_page(self):
return sorted(list(self.__file_deps_page))
def get_uptodate_dependencies_fragment(self):
return self.__uptodate_deps_fragment
def get_uptodate_dependencies_page(self):
return self.__uptodate_deps_page
def get_additional_data(self, name):
return self.__additional_data.get(name)
def store_plugin_data(self, plugin_name, key, data):
if plugin_name not in self.__plugin_data:
self.__plugin_data[plugin_name] = {}
self.__plugin_data[plugin_name][key] = data
def get_plugin_data(self, plugin_name, key, default_value=None):
plugin_data = self.__plugin_data.get(plugin_name)
return default_value if plugin_data is None else plugin_data.get(key, default_value)
def inc_plugin_counter(self, plugin_name, key):
counter = self.get_plugin_data(plugin_name, key, 0)
counter += 1
self.store_plugin_data(plugin_name, key, counter)
return counter
def __str__(self):
return "Context<" + str(self.id) + ">(" + str(self.__file_deps_fragment) + ", " + str(self.__file_deps_page) + ", " + str(self.__uptodate_deps_fragment) + ", " + str(self.__uptodate_deps_page) + ")"
class CompileWordpress(PageCompiler):
"""Compiles a subset of Wordpress into HTML."""
name = "wordpress"
demote_headers = True
site = None
def __init__(self):
super(CompileWordpress, self).__init__()
self.__filters = dict()
self.__shortcodes = shortcodes.ShortCodes()
self.__default_wordpress_filters = default_filters.DefaultWordpressFilters(self.__shortcodes)
self.add_filter("the_content", lambda data, context: self.__default_wordpress_filters.wptexturize(data))
self.add_filter("the_content", lambda data, context: self.__default_wordpress_filters.convert_smilies(data))
self.add_filter("the_content", lambda data, context: self.__default_wordpress_filters.convert_chars(data))
self.add_filter("the_content", lambda data, context: self.__default_wordpress_filters.wpautop(data))
self.add_filter("the_content", lambda data, context: self.__default_wordpress_filters.shortcode_unautop(data))
self.add_filter('the_content', lambda data, context: self.__shortcodes.do_shortcode(data, context), 11) # AFTER wpautop()
def _register_plugins(self):
# collect plugins
count = 0
modules = {
'default_filters': default_filters,
'php': php,
'plugin_interface': plugin_interface,
'shortcodes': shortcodes,
'wordpress': sys.modules[__name__]
}
for plugin in self.get_compiler_extensions():
_LOGGER.info("Registered WordPress plugin {0}".format(plugin.name))
plugin.plugin_object.register(self, modules)
count += 1
_LOGGER.info("Registered {0} WordPress plugin{1}".format(count, "s" if count != 1 else ""))
def register_head_code(self, head_function):
# FIXME: implement
# (not even sure if it's really implementable...)
raise NotImplementedError()
def add_filter(self, tag, filter_function, priority=10):
if tag not in self.__filters:
self.__filters[tag] = list()
f = self.__filters[tag]
# find where to insert priority
i = 0
while i < len(f) and f[i][0] < priority:
i += 1
if i < len(f) and f[i][0] > priority:
f.insert(i, (priority, list()))
elif i == len(f):
f.append((priority, list()))
f[i][1].append(filter_function)
def filter(self, tag, data, context):
if tag not in self.__filters:
return data
for prio, fs in self.__filters[tag]:
for f in fs:
data = f(data, context)
return data
def register_shortcode(self, tag, function):
self.__shortcodes.register_shortcode(tag, function)
def unregister_shortcode(self, tag):
self.__shortcodes.unregister_shortcode(tag)
def do_shortcode(self, data):
return self.__shortcodes.do_shortcode(data)
def set_site(self, site):
super(CompileWordpress, self).set_site(site)
self._register_plugins()
def __formatData(self, data, context, source=None):
output = self.filter("the_content", data, context)
left_shortcodes = self.__shortcodes.get_containing_shortcodes_set(output)
if len(left_shortcodes) > 0 and source is not None:
_LOGGER.warning("The post '" + source + "' still contains shortcodes: " + str(left_shortcodes))
return output
def compile_to_string(self, source_data, name=None, additional_data=None):
context = Context(hash(source_data), name=name, additional_data=additional_data)
return self.__formatData(source_data, context)
def _read_extra_deps(self, post):
dep_path = post.base_path + '.dep'
if os.path.isfile(dep_path):
with io.open(dep_path, 'rb') as file:
result = json.loads(file.read().decode('utf-8'))
if type(result) == list and len(result) == 4:
return result
return ([], [], [], [])
def register_extra_dependencies(self, post):
post.add_dependency(lambda: self._read_extra_deps(post)[0], 'fragment')
post.add_dependency(lambda: self._read_extra_deps(post)[1], 'page')
post.add_dependency_uptodate(lambda: self._read_extra_deps(post)[2], True, 'fragment')
post.add_dependency_uptodate(lambda: self._read_extra_deps(post)[3], True, 'page')
def _write_deps(self, context, dest):
deps_path = dest + '.dep'
if context.has_dependencies():
data = (context.get_file_dependencies_fragment(), context.get_file_dependencies_page(),
context.get_uptodate_dependencies_fragment(), context.get_uptodate_dependencies_page())
with io.open(deps_path, "wb") as file:
file.write(json.dumps(data).encode('utf-8'))
else:
if os.path.isfile(deps_path):
os.unlink(deps_path)
def _read_similar_file(self, source, suffix):
path, filename = os.path.split(source)
filename_parts = filename.split('.')
for i in range(len(filename_parts), 0, -1):
candidate = os.path.join(path, '.'.join(filename_parts[:i]) + suffix)
try:
with open(candidate, "rb") as in_file:
# _LOGGER.info("Found file {0} for {1}.".format(candidate, source))
return in_file.read(), candidate
except:
pass
return None, None
def load_additional_data(self, source):
result = {}
dependent_files = set()
attachments, filename = self._read_similar_file(source, ".attachments.json")
if attachments is not None:
try:
attachments = json.loads(attachments.decode('utf-8'))
result['attachments'] = attachments
dependent_files.add(filename)
except Exception as e:
_LOGGER.error("Could not load attachments for {0}! (Exception: {1})".format(source, e))
return result, dependent_files
def compile_html(self, source, dest, is_two_file=False):
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf8") as out_file:
# Read post
with io.open(source, "r", encoding="utf8") as in_file:
data = in_file.read()
if not is_two_file:
data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1]
# Read additional data
additional_data, dependent_files = self.load_additional_data(source)
# Process post
context = Context(hash(data), name=source, additional_data=additional_data)
for filename in dependent_files:
context.add_file_dependency(filename, 'fragment')
output = self.__formatData(data, context)
# Write result
out_file.write(output)
self._write_deps(context, dest)
def create_post(self, path, content=None, onefile=False, is_page=False, **kw):
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write(write_metadata(metadata))
fd.write('\n')
fd.write(content)
| pluser/nikola_plugins | v7/wordpress_compiler/wordpress/wordpress.py | Python | mit | 11,780 | 0.002547 |
from __future__ import division
import math as m
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as pl
from scipy.optimize import fmin, fmin_powell, minimize
from numpy import (any, array, asarray, ones, ones_like, zeros, isfinite, inf, concatenate, arange, unique, delete,
dot, median, abs, std, nan, diag, log, where, identity, s_, sqrt)
from numpy.random import permutation
from matplotlib.pyplot import subplots, setp
from numpy.linalg.linalg import LinAlgError
from .gp import GeorgeGP, SplitGP
from .kernels import BasicKernel
from .utils import medsig
from .dtdata import DtData
class Detrender(object):
def __init__(self, flux, inputs, mask=None, p0=None, kernel=None, splits=[], tr_nrandom=200, tr_bspan=50, tr_nblocks=6):
self.data = DtData(flux, inputs, mask)
self.kernel = kernel or BasicKernel()
self.gp = SplitGP(self.kernel, splits) if splits is not None else GeorgeGP(self.kernel)
self.tr_data = self.data.create_training_set(tr_nrandom, tr_bspan, tr_nblocks)
self.gp.set_inputs(self.tr_data.masked_inputs)
## ======================
## Convenience routines
## ======================
@property
def flux(self):
return self.data.masked_flux
@property
def time(self):
return self.data.masked_time
## =====================
## Detrending routines
## =====================
def covariance_matrix(self, pv=None, inputs=None, separate=False):
inputs = inputs if inputs is not None else self.tr_data.masked_inputs
self.gp.compute(inputs, pv)
return self.gp._covariance_matrix(inputs, separate=separate)
def neglnposterior(self, pv, training=True):
if any(pv < self.kernel.lims[0]) or any(self.kernel.lims[1] < pv):
return inf
ds = self.tr_data if training else self.data
try:
lnlike = self.gp.lnlikelihood(pv, ds.masked_normalised_flux, ds.masked_inputs)
return -(self.kernel.ln_prior(pv) + lnlike)
except LinAlgError:
return inf
def train(self, pv0=None, disp=False):
pv0 = pv0 if pv0 is not None else self.kernel.pv0
mres = minimize(self.neglnposterior, pv0, method='Powell')
self.tr_pv = mres.x.copy()
return self.tr_pv, mres.success
def predict(self, pv, inputs=None, components=False, mean_only=True):
inputs = inputs if inputs is not None else self.data.unmasked_inputs
self.gp.compute(self.data.masked_inputs, pv)
self.gp._compute_alpha(self.data.masked_normalised_flux)
if components:
mu_time, mu_pos = self.gp.predict_components(inputs)
return ((1. + mu_time) * self.data._fm,
(1. + mu_pos) * self.data._fm)
else:
return self.gp.predict(inputs, mean_only=mean_only)
def detrend_spatial(self, pv):
mt, mp = self.compute_components(pv)
flux = self.data.unmasked_flux.copy()
flux[self.data.mask] += -mp + median(mp)
flux[~self.data.mask] = nan
return flux
## ===================
## Plotting routines
## ===================
def plot_xy(self, pv=None, ax=None, plot_wireframe=False):
"""Plot the x and y points for the whole dataset and the training set.
"""
if ax is None:
fig,ax = subplots(1,1, figsize=(10,10))
if pv is None:
ax.tripcolor(self.data.mx, self.data.my, ones(self.data.nptm), vmin=0, vmax=1)
if plot_wireframe:
ax.triplot(self.data.mx, self.data.my, color='w')
else:
mt, mp = self.compute_components(pv)
ax.tripcolor(self.data.mx, self.data.my, mp)
ax.plot(self.tr_data.mx, self.tr_data.my, 'o', ms=3, c='k', mec='w')
return ax
def plot_t(self, pv=None, ax=None):
""" Plot the flux as a function of time for the whole dataset and the training set.
"""
if ax is None:
fig, ax = subplots(1,1)
fm = self.data.flux_median
fmin = self.data.masked_flux.min()
fmax = self.data.masked_flux.max()
fptp = self.data.masked_flux.ptp()
ax.plot(self.data.mt, self.data.mf, c='0.75', lw=1)
ax.plot(self.tr_data.ut, self.tr_data.uf, '.k', ms=6)
setp(ax, ylim=(0.999*fmin,1.001*fmax))
if pv is not None:
fd = self.detrend_spatial(pv)
fd += fm - np.nanmedian(fd)
mm = isfinite(fd)
ax.plot(self.data.unmasked_time[mm], fd[mm] - 0.7*fptp, alpha=0.75, lw=1)
setp(ax, ylim=(0.999*(fmin-0.7*fptp), 1.001*fmax))
setp(ax, xlim=self.data.mt[[0,-1]], xlabel='Time', ylabel='Flux')
return ax
def plot_report(self, pv, tid, fname=None, maxpt=350):
lmargin, rmargin = 0.12, 0.03
fig = pl.figure(figsize=(8.3,11.7))
fig.text(0.04, 0.965, 'EPIC {:9d}'.format(tid), va='top', size=24, color='w', weight='bold')
ax = fig.add_axes([0,0,1,1])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_zorder(-1000)
ax.add_patch(pl.Rectangle((0,0.92), 1, 0.08, fill=True))
ax_a = fig.add_axes([lmargin,0.25,1-lmargin-rmargin,0.3])
ax_x = fig.add_axes([lmargin,0.05,1-lmargin-rmargin,0.1])
ax_y = fig.add_axes([lmargin,0.15,1-lmargin-rmargin,0.1])
ax_c = fig.add_axes([0.55,0.6,0.45-rmargin, 0.3])
ax_x.plot(self.data.masked_time, self.data.mx, lw=1)
ax_y.plot(self.data.masked_time, self.data.my, lw=1)
## Compute stuff
## -------------
fm = median(self.data.masked_flux)
fmin = self.data.masked_flux.min()
fmax = self.data.masked_flux.max()
fptp = self.data.masked_flux.ptp()
mt, mp = self.compute_components(pv)
ms = self.data.mask
fd = self.data.unmasked_flux.copy()
fd[ms] += -mp + median(mp)
fd[~ms] = nan
fd += fm - np.nanmedian(fd)
## Plot A
## ------
ax_a.plot(self.data.masked_time, self.data.masked_flux/fm, c='0.75', lw=1)
ax_a.plot(self.tr_data.unmasked_time, self.tr_data.unmasked_flux/fm, '.k', ms=6)
ax_a.plot(*self.data.outliers, ls='', marker='o', ms=6)
ax_a.plot(self.data.unmasked_time[ms], (fd[ms] - 0.7*fptp)/fm, lw=1)
ax_a.plot(self.time, (mp-1.4*fptp)/fm, lw=1)
samples = permutation(self.time.size)[:maxpt]
ax_c.tripcolor(self.data.mx[samples], self.data.my[samples], mp[samples])
ax_c.plot(self.tr_data.mx, self.tr_data.my, '.', ms=3, c='w', alpha=0.8)
ax_c.plot(self.tr_data.mx, self.tr_data.my, '.', ms=1.5, c='k')
setp(ax_a, ylim=(0.999*(fmin-1.4*fptp)/fm, 1.001*fmax/fm))
setp(ax_a.get_xticklabels()+ax_y.get_xticklabels(), visible=False)
setp(ax_x, xlabel='Time', ylabel='X')
setp(ax_c, xlabel='X', ylabel='Y')
setp([ax_a,ax_x,ax_y], xlim=self.time[[0,-1]])
setp(ax_a, ylabel='Normalised flux')
setp(ax_y, ylabel='Y')
if fname:
fig.savefig(fname)
| OxES/k2sc | src/detrender.py | Python | gpl-3.0 | 7,258 | 0.012951 |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 17 17:19:10 2016
@author: Michael
"""
from PyQt5 import QtWidgets
class AboutWindow(QtWidgets.QTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
self.setReadOnly(True)
self.setHtml(
"""
<h1 id="kano">Kano</h1>
<p>Copyright (c) 2017, Michael Schreier <br>
All rights reserved.</p>
<p>This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.</p>
<p>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License along with this program. If not, see <a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a></p>
<hr>
<p>Kano has been built using the following libraries:</p>
<h3 id="entypo">Entypo+</h3>
<blockquote>
<p>All icons used by Kano are taken from the “Entypo+” library by Daniel Bruce, available under the Creative Commons license CC BY-SA 4.0.</p>
</blockquote>
<h3 id="pyqt5">PyQt5</h3>
<blockquote>
<p>Copyright (c) 2017, Riverbank Computing Limited <br>
All rights reserved.</p>
<p>This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by >the Free Software Foundation, either version 3 of the License, or (at your option) any later version.</p>
<p>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License along with this program. If not, see <a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a></p>
</blockquote>
<h3 id="fuzzywuzzy">FuzzyWuzzy</h3>
<blockquote>
<p>Copyright (c) 2017, SeatGeak <br>
All rights reserved.</p>
<p>This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.</p>
<p>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA</p>
</blockquote>
<h3 id="pyyaml">PyYAML</h3>
<blockquote>
<p>Copyright (c) 2006, Kirill Simonov <br>
All rights reserved.</p>
<p>Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:</p>
<p>The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.</p>
<p>THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.</p>
</blockquote>
"""
) | MichaelSchreier/Kano | class_UI_aboutWindow.py | Python | gpl-3.0 | 4,429 | 0.029153 |
# -*- coding: utf8 -*-
"""
Physical and Chemical data
"""
from csv import reader as csvreader
from mathics.builtin.base import Builtin
from mathics.core.expression import (Expression, from_python, Symbol, String,
strip_context)
from mathics.settings import ROOT_DIR
def load_element_data():
element_file = open(ROOT_DIR + 'data/element.csv', 'rb')
reader = csvreader(element_file, delimiter='\t')
element_data = []
for row in reader:
element_data.append([value for value in row])
element_file.close()
return element_data
_ELEMENT_DATA = load_element_data()
class ElementData(Builtin):
"""
<dl>
<dt>'ElementData["$name$", "$property$"]
<dd>gives the value of the $property$ for the chemical specified by $name$".
<dt>'ElementData[$n$, "$property$"]
<dd>gives the value of the $property$ for the $n$th chemical element".
</dl>
>> ElementData[74]
= Tungsten
>> ElementData["He", "AbsoluteBoilingPoint"]
= 4.22
>> ElementData["Carbon", "IonizationEnergies"]
= {1086.5, 2352.6, 4620.5, 6222.7, 37831, 47277.}
>> ElementData[16, "ElectronConfigurationString"]
= [Ne] 3s2 3p4
>> ElementData[73, "ElectronConfiguration"]
= {{2}, {2, 6}, {2, 6, 10}, {2, 6, 10, 14}, {2, 6, 3}, {2}}
The number of known elements:
>> Length[ElementData[All]]
= 118
Some properties are not appropriate for certain elements:
>> ElementData["He", "ElectroNegativity"]
= Missing[NotApplicable]
Some data is missing:
>> ElementData["Tc", "SpecificHeat"]
= Missing[NotAvailable]
All the known properties:
>> ElementData["Properties"]
= {Abbreviation, AbsoluteBoilingPoint, AbsoluteMeltingPoint, AtomicNumber, AtomicRadius, AtomicWeight, Block, BoilingPoint, BrinellHardness, BulkModulus, CovalentRadius, CrustAbundance, Density, DiscoveryYear, ElectroNegativity, ElectronAffinity, ElectronConfiguration, ElectronConfigurationString, ElectronShellConfiguration, FusionHeat, Group, IonizationEnergies, LiquidDensity, MeltingPoint, MohsHardness, Name, Period, PoissonRatio, Series, ShearModulus, SpecificHeat, StandardName, ThermalConductivity, VanDerWaalsRadius, VaporizationHeat, VickersHardness, YoungModulus}
>> ListPlot[Table[ElementData[z, "AtomicWeight"], {z, 118}]]
= -Graphics-
"""
rules = {
'ElementData[n_]': 'ElementData[n, "StandardName"]',
'ElementData[]': 'ElementData[All]',
'ElementData["Properties"]': 'ElementData[All, "Properties"]',
}
messages = {
'noent': ('`1` is not a known entity, class, or tag for ElementData. '
'Use ElementData[] for a list of entities.'),
'noprop': ('`1` is not a known property for ElementData. '
'Use ElementData["Properties"] for a list of properties.'),
}
def apply_all(self, evaluation):
'ElementData[All]'
iprop = _ELEMENT_DATA[0].index('StandardName')
return from_python([element[iprop] for element in _ELEMENT_DATA[1:]])
def apply_all_properties(self, evaluation):
'ElementData[All, "Properties"]'
return from_python(sorted(_ELEMENT_DATA[0]))
def apply_name(self, name, prop, evaluation):
"ElementData[name_?StringQ, prop_]"
py_name = name.to_python().strip('"')
names = ['StandardName', 'Name', 'Abbreviation']
iprops = [_ELEMENT_DATA[0].index(s) for s in names]
indx = None
for iprop in iprops:
try:
indx = [element[iprop] for element in
_ELEMENT_DATA[1:]].index(py_name) + 1
except ValueError:
pass
if indx is None:
evaluation.message("ElementData", "noent", name)
return
return self.apply_int(from_python(indx), prop, evaluation)
def apply_int(self, n, prop, evaluation):
"ElementData[n_?IntegerQ, prop_]"
from mathics.core.parser import parse
py_n = n.to_python()
py_prop = prop.to_python()
# Check element specifier n or "name"
if isinstance(py_n, int):
if not 1 <= py_n <= 118:
evaluation.message("ElementData", "noent", n)
return
elif isinstance(py_n, unicode):
pass
else:
evaluation.message("ElementData", "noent", n)
return
# Check property specifier
if isinstance(py_prop, str) or isinstance(py_prop, unicode):
py_prop = str(py_prop)
if py_prop == '"Properties"':
result = []
for i, p in enumerate(_ELEMENT_DATA[py_n]):
if p not in ["NOT_AVAILABLE", "NOT_APPLICABLE", "NOT_KNOWN"]:
result.append(_ELEMENT_DATA[0][i])
return from_python(sorted(result))
if not (isinstance(py_prop, str) and
py_prop[0] == py_prop[-1] == '"' and
py_prop.strip('"') in _ELEMENT_DATA[0]):
evaluation.message("ElementData", "noprop", prop)
return
iprop = _ELEMENT_DATA[0].index(py_prop.strip('"'))
result = _ELEMENT_DATA[py_n][iprop]
if result == "NOT_AVAILABLE":
return Expression("Missing", "NotAvailable")
if result == "NOT_APPLICABLE":
return Expression("Missing", "NotApplicable")
if result == "NOT_KNOWN":
return Expression("Missing", "Unknown")
result = parse(result, evaluation.definitions)
if isinstance(result, Symbol):
result = String(strip_context(result.get_name()))
return result
| benley/Mathics | mathics/builtin/physchemdata.py | Python | gpl-3.0 | 5,698 | 0.000527 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import re
import sys
from ansible import constants as C
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.nxos.nxos import nxos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split('.')[-1]
self._config_module = True if module_name == 'nxos_config' else False
persistent_connection = self._play_context.connection.split('.')[-1]
warnings = []
if (self._play_context.connection in ('httpapi', 'local') or self._task.args.get('provider', {}).get('transport') == 'nxapi') \
and module_name in ('nxos_file_copy', 'nxos_nxapi'):
return {'failed': True, 'msg': "Transport type 'nxapi' is not valid for '%s' module." % (module_name)}
if module_name == 'nxos_file_copy':
self._task.args['host'] = self._play_context.remote_addr
self._task.args['password'] = self._play_context.password
if self._play_context.connection == 'network_cli':
self._task.args['username'] = self._play_context.remote_user
elif self._play_context.connection == 'local':
self._task.args['username'] = self._play_context.connection_user
if module_name == 'nxos_install_os':
connection = self._connection
if connection.transport == 'local':
persistent_command_timeout = C.PERSISTENT_COMMAND_TIMEOUT
persistent_connect_timeout = C.PERSISTENT_CONNECT_TIMEOUT
else:
persistent_command_timeout = connection.get_option('persistent_command_timeout')
persistent_connect_timeout = connection.get_option('persistent_connect_timeout')
display.vvvv('PERSISTENT_COMMAND_TIMEOUT is %s' % str(persistent_command_timeout), self._play_context.remote_addr)
display.vvvv('PERSISTENT_CONNECT_TIMEOUT is %s' % str(persistent_connect_timeout), self._play_context.remote_addr)
if persistent_command_timeout < 600 or persistent_connect_timeout < 600:
msg = 'PERSISTENT_COMMAND_TIMEOUT and PERSISTENT_CONNECT_TIMEOUT'
msg += ' must be set to 600 seconds or higher when using nxos_install_os module.'
msg += ' Current persistent_command_timeout setting:' + str(persistent_command_timeout)
msg += ' Current persistent_connect_timeout setting:' + str(persistent_connect_timeout)
return {'failed': True, 'msg': msg}
if persistent_connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(nxos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'ansible.netcommon.network_cli'
pc.network_os = 'cisco.nxos.nxos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
connection = self._shared_loader_obj.connection_loader.get('ansible.netcommon.persistent', pc, sys.stdin,
task_uuid=self._task._uuid)
# TODO: Remove below code after ansible minimal is cut out
if connection is None:
pc.connection = 'network_cli'
pc.network_os = 'nxos'
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.nxapi_implementation(provider, self._play_context)
warnings.append(['connection local support for this module is deprecated and will be removed in version 2.14,'
' use connection either httpapi or ansible.netcommon.httpapi (whichever is applicable)'])
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
result = super(ActionModule, self).run(task_vars=task_vars)
if warnings:
if 'warnings' in result:
result['warnings'].extend(warnings)
else:
result['warnings'] = warnings
return result
@staticmethod
def nxapi_implementation(provider, play_context):
provider['transport'] = 'nxapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
if provider.get('use_ssl'):
provider['port'] = 443
else:
provider['port'] = 80
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('use_ssl') is None:
provider['use_ssl'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = True
return provider
| mikewiebe-ansible/ansible | lib/ansible/plugins/action/nxos.py | Python | gpl-3.0 | 8,307 | 0.003732 |
#
# Implements encrypting functions.
#
# Copyright (c) 2008, F S 3 Consulting Inc.
#
# Maintainer:
# Alec Joseph Rivera (agi<at>fs3.ph)
# refactored by Antony Lesuisse <al<at>openerp.com>
#
import hashlib
import hmac
import logging
from random import sample
from string import ascii_letters, digits
import openerp
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
magic_md5 = '$1$'
magic_sha256 = '$5$'
openerp.addons.base.res.res_users.USER_PRIVATE_FIELDS.append('password_crypt')
def gen_salt(length=8, symbols=None):
if symbols is None:
symbols = ascii_letters + digits
return ''.join(sample(symbols, length))
def md5crypt( raw_pw, salt, magic=magic_md5 ):
""" md5crypt FreeBSD crypt(3) based on but different from md5
The md5crypt is based on Mark Johnson's md5crypt.py, which in turn is
based on FreeBSD src/lib/libcrypt/crypt.c (1.2) by Poul-Henning Kamp.
Mark's port can be found in ActiveState ASPN Python Cookbook. Kudos to
Poul and Mark. -agi
Original license:
* "THE BEER-WARE LICENSE" (Revision 42):
*
* <phk@login.dknet.dk> wrote this file. As long as you retain this
* notice you can do whatever you want with this stuff. If we meet some
* day, and you think this stuff is worth it, you can buy me a beer in
* return.
*
* Poul-Henning Kamp
"""
raw_pw = raw_pw.encode('utf-8')
salt = salt.encode('utf-8')
hash = hashlib.md5()
hash.update( raw_pw + magic + salt )
st = hashlib.md5()
st.update( raw_pw + salt + raw_pw)
stretch = st.digest()
for i in range( 0, len( raw_pw ) ):
hash.update( stretch[i % 16] )
i = len( raw_pw )
while i:
if i & 1:
hash.update('\x00')
else:
hash.update( raw_pw[0] )
i >>= 1
saltedmd5 = hash.digest()
for i in range( 1000 ):
hash = hashlib.md5()
if i & 1:
hash.update( raw_pw )
else:
hash.update( saltedmd5 )
if i % 3:
hash.update( salt )
if i % 7:
hash.update( raw_pw )
if i & 1:
hash.update( saltedmd5 )
else:
hash.update( raw_pw )
saltedmd5 = hash.digest()
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):
v = ord( saltedmd5[a] ) << 16 | ord( saltedmd5[b] ) << 8 | ord( saltedmd5[c] )
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord( saltedmd5[11] )
for i in range( 2 ):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged
def sh256crypt(cls, password, salt, magic=magic_sha256):
iterations = 1000
# see http://en.wikipedia.org/wiki/PBKDF2
result = password.encode('utf8')
for i in xrange(cls.iterations):
result = hmac.HMAC(result, salt, hashlib.sha256).digest() # uses HMAC (RFC 2104) to apply salt
result = result.encode('base64') # doesnt seem to be crypt(3) compatible
return '%s%s$%s' % (magic_sha256, salt, result)
class res_users(osv.osv):
_inherit = "res.users"
def init(self, cr):
"""Encrypt all passwords at module installation"""
cr.execute("SELECT id, password FROM res_users WHERE password IS NOT NULL and password != ''")
for user in cr.fetchall():
self._set_encrypted_password(cr, user[0], user[1])
def _set_encrypted_password(self, cr, uid, plain_password):
"""Set an encrypted password for a given user"""
salt = gen_salt()
stored_password_crypt = md5crypt(plain_password, salt)
cr.execute("UPDATE res_users SET password = '', password_crypt = %s WHERE id = %s",
(stored_password_crypt, uid))
def set_pw(self, cr, uid, id, name, value, args, context):
if value:
self._set_encrypted_password(cr, id, value)
del value
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
stored_pws = cr.fetchall()
res = {}
for id, stored_pw in stored_pws:
res[id] = stored_pw
return res
_columns = {
'password': fields.function(get_pw, fnct_inv=set_pw, type='char', string='Password', invisible=True, store=True),
'password_crypt': fields.char(string='Encrypted Password', invisible=True),
}
def check_credentials(self, cr, uid, password):
# convert to base_crypt if needed
cr.execute('SELECT password, password_crypt FROM res_users WHERE id=%s AND active', (uid,))
if cr.rowcount:
stored_password, stored_password_crypt = cr.fetchone()
if stored_password and not stored_password_crypt:
self._set_encrypted_password(cr, uid, stored_password)
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
# check md5crypt
if stored_password_crypt:
if stored_password_crypt[:len(magic_md5)] == magic_md5:
salt = stored_password_crypt[len(magic_md5):11]
if stored_password_crypt == md5crypt(password, salt):
return
elif stored_password_crypt[:len(magic_md5)] == magic_sha256:
salt = stored_password_crypt[len(magic_md5):11]
if stored_password_crypt == md5crypt(password, salt):
return
# Reraise password incorrect
raise
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jaggu303619/asylum | openerp/addons/auth_crypt/auth_crypt.py | Python | agpl-3.0 | 5,849 | 0.009574 |
sabor = input()
quantidade = int(input())
if sabor.lower() == "morango" or sabor.lower() == "cereja":
total = quantidade*4.50
elif sabor.lower() == "damasco" or sabor.lower() == "siriguela":
total = quantidade*3.80
else:
total = quantidade*2.75
print("%.2f"%total)
if quantidade > 2:
print ("COM CALDA")
else:
print("SEM CALDA")
| SANDEISON/The-Huxley | Python/Sorveteria Tropical,py.py | Python | gpl-3.0 | 351 | 0.005698 |
from todoman import version # type: ignore
__version__ = version.version
__documentation__ = "https://todoman.rtfd.org/en/latest/"
| pimutils/todoman | todoman/__init__.py | Python | isc | 133 | 0 |
#!/usr/bin/python3
from . tkLEDTable import *
try:
import serial
except:
import sys
print("Install the serial module width '{} -m pip install PySerial'.".format(sys.executable))
raise
import threading
try:
from queue import Queue
except ImportError:
from Queue import Queue
import itertools
def list_serial_ports():
## got the code from
## http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
import os
from serial.tools import list_ports
# Windows
if os.name == 'nt':
# Scan for available ports.
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append('COM'+str(i + 1))
s.close()
except serial.SerialException:
pass
return available
else:
# Mac / Linux
return [port[0] for port in list_ports.comports()]
class SerialLEDTable(threading.Thread):
COMMAND_CHARACTER = b"!"
def __init__(self, led_table, file):
super(SerialLEDTable, self).__init__()
self.led_table = led_table
self.file = file
def run(self):
for line in self.file:
if not line.startswith(self.COMMAND_CHARACTER):
self.default_line(line)
else:
self.command_line(line[1:])
def default_line(self, line):
if line.endswith(b"\n"):
line = line[:-1]
if line.endswith(b"\r"):
line = line[:-1]
try:
line = line.decode("ASCII")
except UnicodeDecodeError:
pass
print(line)
def command_line(self, line):
command = line.split()
if command[0] == b"p":
height = int(command[1])
self.set_pixels(height)
elif command[0] == b"o":
height = int(command[1])
self.set_pixel_order(height)
else:
print("Invalid Command: {}".format(command))
def set_pixel_order(self, height):
indices = [[(int(i), x, y) for x, i in enumerate(self.file.readline().rstrip().split())]
for y in range(height)]
indices = list(itertools.chain.from_iterable(indices))
indices.sort()
coordinates = [(x, y) for i, x, y in indices]
self.set_pixel_order_on_ledtable(coordinates)
def set_pixel_order_on_ledtable(self, coordinates):
self.led_table.show_pixel_order(coordinates)
def set_pixels(self, height):
for y in range(height):
line = self.file.readline().rstrip()
pixels = line.decode("ASCII").split("#")[1:]
for x, pixel in enumerate(pixels):
self.set_pixel(x, y, pixel)
def set_pixel(self, x, y, color):
if not color:
return # nothing happened
if len(color) > 6:
color = color[-6:]
else:
color = color.rjust(6, "0")
color = "#" + color
self.set_pixel_on_ledtable(x, y, color)
def set_pixel_on_ledtable(self, x, y, color):
if x >= self.led_table.width:
self.led_table.width = x + 1
if y >= self.led_table.height:
self.led_table.height = y + 1
self.led_table.set_pixel_color(x, y, color)
class TkSerialLEDTable(SerialLEDTable):
def __init__(self, *args, **kw):
super(TkSerialLEDTable, self).__init__(*args, **kw)
self.queue = Queue()
self.led_table.after(1, self.loop)
def set_pixel_on_ledtable(self, *args):
self.queue.put((super(TkSerialLEDTable, self).set_pixel_on_ledtable, args, {}))
def set_pixel_order_on_ledtable(self, *args):
self.queue.put((super(TkSerialLEDTable, self).set_pixel_order_on_ledtable, args, {}))
def loop(self):
while not self.queue.empty():
func, args, kw = self.queue.get(block = False)
func(*args, **kw)
self.led_table.after(1, self.loop)
if __name__ == "__main__":
import sys
if not sys.argv[1:]:
ports = list_serial_ports()
if not ports:
print("No serial ports detected. You should pass one as argument.")
else:
print('The first argument should be a serial port, e.g. "{}"'.format(
"\", \"".join(map(str, ports))))
print("The second argument can be the baud rate of the serial connection.")
print("If there is no second argument we assume 9600.")
exit(1)
port = sys.argv[1]
if len(sys.argv) >= 3:
baudrate = sys.argv[2]
else:
baudrate = 9600
s = serial.serial_for_url(port, baudrate = baudrate)
t = LEDTk()
t.title(port)
ts = TkSerialLEDTable(t, s)
ts.deamon = True
ts.start()
try:
t.mainloop()
finally:
s.close()
| niccokunzmann/ledtable | python/ledtable/SerialLEDTable.py | Python | mit | 4,859 | 0.004322 |
from Alignment import Alignment
import sys,time,os
import pf,func
from Var import var
from Glitch import Glitch
class Data:
"""All the alignments that you want to work with, in one place.
Initialize this with one of
- nothing (or None),
- a list of Alignment objects, or
- a single Alignment object.
If you initialize with nothing (or None), then all alignments in
var.alignments are used. If you initialize with a list of
alignments, then that is used. You can initialize with an empty
list to get an empty Data object.
"""
#def __del__(self, freeData=pf.freeData, dp_freeData=pf.dp_freeData):
def __del__(self, freeData=pf.freeData):
if self.alignments:
for a in self.alignments:
a.parts = []
self.alignments = None
if self.parts:
#print len(self.parts)
for p in self.parts:
#if p.cPart:
# freePart(p.cPart) # this is not as good as p.__del__(), as it leaves some memory un-freed
p.__del__()
#else:
# print 0
if self.cData:
if self.doDataPart:
dp_freeData(self.cData)
else:
freeData(self.cData)
self.cData = None
self.parts = None
self.taxNames = None
## ##Ignore
## def wipe(self):
## if self.cData:
## freeData(self.cData)
## self.cData = None
## for p in self.parts:
## if p.cPart:
## pf.freePart(p.cPart)
## p.cPart = None
## del(p)
## self.taxNames = None
## for a in self.alignments:
## del(a)
## self.alignments = None
def __init__(self, alignments=None):
gm = ['Data.__init__()']
self.nParts = 0
self.parts = []
self.alignments = []
self.nTax = 0
self.taxNames = []
self.cData = None
self.unconstrainedLogLikelihood = None
if alignments:
if isinstance(alignments, Alignment):
#Passed in a single alignment object not a list
alignments = [alignments]
else:
if type(alignments) != type([]):
gm.append("The 'alignments' arg should be a list or a single Alignment object.")
raise Glitch, gm
for a in alignments:
if isinstance(a, Alignment):
pass
else:
gm.append("Something in the 'alignments' arg was not an Alignment.")
raise Glitch, gm
self._fill(alignments)
elif alignments == []:
pass
elif var.alignments:
self._fill(var.alignments)
# temporary! Only used in __del__()
self.doDataPart = var.doDataPart
def dump(self):
"""Print rubbish about self."""
print "Data dump"
if self.nParts == 1:
if var.doDataPart:
print " There is 1 dataPart"
else:
print " There is 1 part"
else:
if var.doDataPart:
print " There are %i dataParts" % self.nParts
else:
print " There are %i parts" % self.nParts
for p in self.parts:
print " name=%s, nChar %i, dataType %s, cPart %s" % \
(p.name, p.nChar, p.dataType, p.cPart)
print " There are %i taxa" % self.nTax
if len(self.alignments) == 1:
print " There is 1 alignment"
else:
print " There are %i alignments" % len(self.alignments)
if self.cData:
print " The cData is %s" % self.cData
else:
print " There is no cData"
if self.unconstrainedLogLikelihood:
print " The unconstrainedLogLikelihood is %s" % self.unconstrainedLogLikelihood
else:
pass
def _fill(self, alignments):
# Fill self with Parts from all alignments.
#
# This method is called from __init__(), and it is generally
# not needed on its own. If we get here, we can be fairly sure
# that arg alignments is a non-empty list of Alignment
# objects. This method calls the Alignment method _initParts()
gm = ["Data._fill()"]
self.alignments = alignments
# Make a part out of the first alignment.
if not len(self.alignments):
gm.append("There are no alignments")
raise Glitch, gm
a = self.alignments[0]
if var.doDataPart:
a.initDataParts()
else:
a._initParts()
if not len(a.parts):
gm.append("First alignment failed to make a part")
raise Glitch, gm
self.taxNames = a.taxNames
self.nTax = len(self.taxNames)
for p in a.parts:
self.parts.append(p)
self.nParts = len(self.parts)
# Now do subsequent alignments ...
for aligNum in range(len(self.alignments))[1:]:
a = self.alignments[aligNum]
if self.nTax != len(a.sequences):
gm.append("Additional alignment is not the same size as the first alignment.")
if a.fName:
gm.append('(New alignment from file %s.)' % a.fName)
gm.append("From the first alignment, nTax is %s." % self.nTax)
gm.append("However, (zero-based) alignment %i has %i sequences." % (aligNum, len(a.sequences)))
raise Glitch, gm
if self.nTax != len(a.taxNames):
gm.append("Additional alignment appears to be not the same size as the first alignment.")
if a.fName:
gm.append('(New alignment from file %s.)' % a.fName)
gm.append("From the first alignment, nTax is %s." % self.nTax)
gm.append("However, (zero-based) alignment %i has %i taxNames." % (aligNum, len(a.taxNames)))
raise Glitch, gm
for i in range(self.nTax):
if self.taxNames[i] != a.taxNames[i]:
gm.append("Name mis-match in (zero-based) taxon number %i," % i)
gm.append("in (zero-based) alignment %i." % aligNum)
if a.fName:
gm.append('(New alignment from file %s.)' % a.fName)
gm.append("Newly-added alignment taxname %s is not the" % a.taxNames[i])
gm.append(" same as first alignment taxname %s" % self.taxNames[i])
raise Glitch, gm
if var.doDataPart:
a.initDataParts()
else:
a._initParts()
if not len(a.parts):
gm.append("Additional alignment failed to make a part.")
if a.fName:
gm.append('(New alignment from file %s.)' % a.fName)
raise Glitch, gm
for p in a.parts:
self.parts.append(p)
self.nParts = len(self.parts)
def calcUnconstrainedLogLikelihood1(self):
"""Calculate likelihood under the multinomial model.
This calculates the unconstrained (multinomial) log like
without regard to character partitions. The result is placed
in the data variable unconstrainedLogLikelihood. If there is
more than one partition, it makes a new temporary alignment
and puts all the sequences in one part in that alignment. So
it ultimately only works on one data partition. If there is
more than one alignment, there is possibly more than one
datatype, and so this method will refuse to do it. Note that
the unconstrained log like of the combined data is not the sum
of the unconstrained log likes of the separate partitions.
See also calcUnconstrainedLogLikelihood2
"""
if len(self.alignments) > 1:
gm = ["Data.calcUnconstrainedLogLikelihood()"]
gm.append("This method is not implemented for more than one alignment.")
raise Glitch, gm
if self.nParts == 1: # no problem
self.unconstrainedLogLikelihood = pf.getUnconstrainedLogLike(self.parts[0].cPart)
else:
a = self.alignments[0]
import copy
newAlig = Alignment()
newAlig.dataType = a.dataType
newAlig.symbols = a.symbols
newAlig.dim = a.dim
newAlig.equates = a.equates
newAlig.taxNames = a.taxNames
for s in a.sequences:
newAlig.sequences.append(copy.deepcopy(s))
newAlig.checkLengthsAndTypes()
newAlig._initParts()
#newAlig.dump()
self.unconstrainedLogLikelihood = pf.getUnconstrainedLogLike(newAlig.parts[0].cPart)
del(newAlig)
def calcUnconstrainedLogLikelihood2(self):
"""Calculate likelihood under the multinomial model.
This calculates the unconstrained log like of each data
partition and places the sum in the Data (self) variable
unconstrainedLogLikelihood. Note that the unconstrained log
like of the combined data is not the sum of the unconstrained
log likes of the separate partitions. See also
calcUnconstrainedLogLikelihood1
"""
uncon = 0.0
for p in self.parts:
#print " %i %f" % (p.cPart, pf.getUnconstrainedLogLike(p.cPart))
uncon = uncon + pf.getUnconstrainedLogLike(p.cPart)
self.unconstrainedLogLikelihood = uncon
def _setCStuff(self):
if self.cData:
gm = ["Data._setCStuff()"]
gm.append("This should only be called if self.cData does not exist!")
raise Glitch, gm
else:
if var.doDataPart:
self.cData = pf.dp_newData(self.nTax, self.nParts)
for i in range(self.nParts):
p = self.parts[i]
pf.dp_pokeDataPartInData(p.cPart, self.cData, i)
else:
self.cData = pf.newData(self.nTax, self.nParts)
for i in range(self.nParts):
p = self.parts[i]
pf.pokePartInData(p.cPart, self.cData, i)
#print "Made Data.cData = %s" % self.cData
def writeNexus(self, fName=None, writeDataBlock=0, interleave=0, flat=0, append=0):
"""Write all the alignments in self to a Nexus file.
If writeDataBlock=1, then taxa and characters are written to a
'data' block, rather than the default, which is to write
separate 'taxa' and 'characters' blocks.
Arg 'flat' gives sequences all on one line.
Arg 'append', if 0, writes #NEXUS first. If 1, does not write #NEXUS.
"""
# There may be more than one alignment, and we need to do the first
# one first, because it may or may not be appended, while the remaining
# alignments are appended for sure.
if len(self.alignments):
a = self.alignments[0]
#if a.parts and len(a.parts):
# a.resetSequencesFromParts() # simulate should be responsible for this
a.writeNexus(fName, writeDataBlock, interleave, flat, append)
for a in self.alignments[1:]:
#if a.parts and len(a.parts):
# a.resetSequencesFromParts()
a.writeNexus(fName, writeDataBlock, interleave, flat, append=1)
def resetSequencesFromParts(self):
for a in self.alignments:
if a.parts:
a.resetSequencesFromParts()
else:
raise Glitch, "Alignment has no parts."
def compoSummary(self):
"""A verbose composition summary, one for each data partition."""
print "\n\nData composition summary"
print "========================\n"
# Make a name format (eg '%12s') that is long enough for the longest name
longestNameLen = 7 # to start
for i in self.taxNames:
if len(i) > longestNameLen:
longestNameLen = len(i)
nameFormat = '%' + '%i' % (longestNameLen + 1) + 's'
for i in range(len(self.parts)):
p = self.parts[i]
print "Part %i" % i
print "%s" % (' ' * (longestNameLen + 1)),
for j in range(len(p.symbols)):
print "%10s" % p.symbols[j],
print "%10s" % 'nSites'
#print ''
#cumulativeComps = [0.0] * len(p.symbols)
grandTotalNSites = 0
for k in range(p.nTax):
c = p.composition([k])
#print "tax %s, part.composition() returns %s" % (k, c)
nSites = pf.partSequenceSitesCount(p.cPart, k)
grandTotalNSites = grandTotalNSites + nSites
print nameFormat % self.taxNames[k],
# Usually sum(c) will be 1.0, unless the sequence is
# empty. We don't want to test "if sum(c) == 0.0:" or
# "if sum(c):" cuz of small numbers.
if sum(c) > 0.99:
for j in range(len(p.symbols)):
print "%10.4f" % c[j],
#cumulativeComps[j] = cumulativeComps[j] + (c[j] * nSites)
else: # Empty sequence, all zeros. Write dashes.
for j in range(len(p.symbols)):
print "%10s" % '-',
print "%10s" % nSites
c = p.composition()
print nameFormat % 'mean',
for j in range(len(p.symbols)):
print "%10.4f" % c[j],
#print "%10s" % grandTotalNSites
print "%10.4f" % (float(grandTotalNSites)/self.nTax)
print "\n"
def compoChiSquaredTest(self, verbose=1, skipColumnZeros=0, useConstantSites=1, skipTaxNums=None, getRows=0):
"""A chi square composition test for each data partition.
So you could do, for example::
read('myData.nex')
# Calling Data() with no args tells it to make a Data object
# using all the alignments in var.alignments
d = Data()
# Do the test. By default it is verbose, and prints results.
# Additionally, a list of lists is returned
ret = d.compoChiSquaredTest()
# With verbose on, it might print something like ---
# Part 0: Chi-square = 145.435278, (dof=170) P = 0.913995
print ret
# The list of lists that it returns might be something like ---
# [[145.43527849758556, 170, 0.91399521077908041]]
# which has the same numbers as above, with one
# inner list for each data partition.
If your data has more than one partition::
read('first.nex')
read('second.nex')
d = Data()
d.compoChiSquaredTest()
# Output something like ---
# Part 0: Chi-square = 200.870463, (dof=48) P = 0.000000
# Part 1: Chi-square = 57.794704, (dof=80) P = 0.971059
# [[200.87046313430443, 48, 0.0], [57.794704451018163, 80, 0.97105866938683427]]
where the last line is returned. With *verbose* turned off,
the ``Part N`` lines are not printed.
This method returns a list of lists, one for each data
partition. If *getRows* is off, the default, then it is a
list of 3-item lists, and if *getRows* is turned on then it is
a list of 4-item lists. In each inner list, the first is the
X-squared statistic, the second is the degrees of freedom, and
the third is the probability from chi-squared. (The expected
comes from the data.) If *getRows* is turned on, the 4th item
is a list of X-sq contributions from individual rows (ie
individual taxa), that together sum to the X-sq for the whole
partition as found in the first item. This latter way is the
way that Tree-Puzzle does it.
Note that this ostensibly tests whether the data are
homogeneous in composition, but it does not work on sequences
that are related. That is, testing whether the X^2 stat is
significant using the chi^2 curve has a high probability of
type II error for phylogenetic sequences.
However, the X-squared stat can be used in valid ways. You
can simulate data under the tree and model, and so generate a
valid null distribution of X^2 values from the simulations, by
which to assess the significance of the original X^2. You can
use this method to generate X^2 values.
A problem arises when a composition of a character is zero.
If that happens, we can't calculate X-squared because there
will be a division by zero. If *skipColumnZeros* is set to 1,
then those columns are simply skipped. They are silently
skipped unless verbose is turned on.
So lets say that your original data have all characters, but
one of them has a very low value. That is reflected in the
model, and when you do simulations based on the model you
occasionally get zeros for that character. Here it is up to
you: you could say that the the data containing the zeros are
validly part of the possibilities and so should be included,
or you could say that the data containing the zeros are not
valid and should be excluded. You choose between these by
setting *skipColumnZeros*. Note that if you do not set
*skipColumnZeros*, and then you analyse a partition that has
column zeros, the result is None for that partition.
Another problem occurs when a partition is completely missing
a sequence. Of course that sequence does not contribute to
the stat. However, in any simulations that you might do, that
sequence *will* be there, and *will* contribute to the stat.
So you will want to skip that sequence when you do your calcs
from the simulation. You can do that with the *skipTaxNums*
arg, which is a list of lists. The outer list is nParts long,
and each inner list is a list of taxNums to exclude.
"""
if not useConstantSites:
newData = Data([])
aligs = []
for a in self.alignments:
#aligs.append(a.removeConstantSites())
aligs.append(a.subsetUsingMask(a.constantMask(), theMaskChar='1', inverse=1))
newData._fill(aligs)
theResult = newData.compoChiSquaredTest(verbose=verbose,
skipColumnZeros=skipColumnZeros,
useConstantSites=1, skipTaxNums=skipTaxNums,
getRows=getRows)
del(newData)
return theResult
gm = ['Data.compoChiSquaredTest()']
nColumnZeros = 0
results = []
# check skipTaxNums
if skipTaxNums != None:
if type(skipTaxNums) != type([]):
gm.append("skipTaxNums should be a list of lists.")
raise Glitch, gm
if len(skipTaxNums) != self.nParts:
gm.append("skipTaxNums should be a list of lists, nParts long.")
raise Glitch, gm
for s in skipTaxNums:
if type(s) != type([]):
gm.append("skipTaxNums should be a list of lists.")
raise Glitch, gm
for i in s:
if type(i) != type(1):
gm.append("skipTaxNums inner list items should be tax numbers.")
gm.append("Got %s" % i)
raise Glitch, gm
# Check for blank sequences. Its a pain to force the user to do this.
hasBlanks = False
blankSeqNums = []
for partNum in range(self.nParts):
p = self.parts[partNum]
partBlankSeqNums = []
for taxNum in range(self.nTax):
if skipTaxNums and skipTaxNums[partNum] and taxNum in skipTaxNums[partNum]:
pass
else:
nSites = pf.partSequenceSitesCount(p.cPart, taxNum) # no gaps, no missings
if not nSites:
partBlankSeqNums.append(taxNum)
if partBlankSeqNums:
hasBlanks = True
blankSeqNums.append(partBlankSeqNums)
if hasBlanks:
gm.append("These sequence numbers were found to be blank. They should be excluded.")
gm.append("%s" % blankSeqNums)
gm.append("Set the arg skipTaxNums to this list.")
raise Glitch, gm
for partNum in range(self.nParts):
gm = ['Data.compoChiSquaredTest() Part %i' % partNum]
p = self.parts[partNum]
comps = []
for taxNum in range(self.nTax):
if skipTaxNums and skipTaxNums[partNum] and taxNum in skipTaxNums[partNum]:
pass
else:
oneComp = p.composition([taxNum])
nSites = pf.partSequenceSitesCount(p.cPart, taxNum) # no gaps, no missings
#print "tax %i, nSites=%i, oneComp=%s" % (taxNum, nSites, oneComp)
if nSites:
for k in range(len(oneComp)):
oneComp[k] = oneComp[k] * nSites
comps.append(oneComp)
else:
gm.append("(Zero-based) sequence %i is blank, and should be excluded." % taxNum)
gm.append("You need to add the number %i to the arg skipTaxNums list of lists." % taxNum)
gm.append("(I could do that automatically, but it is best if *you* do it, explicitly.)")
gm.append("You can use the Alignment method checkForBlankSequences(listSeqNumsOfBlanks=True)")
gm.append("to help you get those inner lists.")
raise Glitch, gm
#print "comps=", comps
# Here we calculate the X^2 stat. But we want to check
# for columns summing to zero. So we can't use
# func.xSquared()
nRows = len(comps)
nCols = len(comps[0])
theSumOfRows = func._sumOfRows(comps) # I could have just kept nSites, above
theSumOfCols = func._sumOfColumns(comps)
#print theSumOfCols
isOk = 1
columnZeros = []
for j in range(len(theSumOfRows)):
if theSumOfRows[j] == 0.0:
gm.append("Zero in a row sum. Programming error.")
raise Glitch, gm
for j in range(len(theSumOfCols)):
if theSumOfCols[j] == 0.0:
if skipColumnZeros:
columnZeros.append(j)
else:
if verbose:
print gm[0]
print " Zero in a column sum."
print " And skipColumnZeros is not set, so I am refusing to do it at all."
isOk = 0
nColumnZeros += 1
theExpected = func._expected(theSumOfRows, theSumOfCols)
#print "theExpected = ", theExpected
#print "columnZeros = ", columnZeros
if isOk:
if getRows:
xSq_rows = []
xSq = 0.0
alreadyGivenZeroWarning = 0
k = 0
for taxNum in range(self.nTax):
if skipTaxNums and skipTaxNums[partNum] and taxNum in skipTaxNums[partNum]:
if getRows:
xSq_rows.append(0.0) # this taxon is not in comps. Add a placeholder
else: # k is the counter for comps and theExpected, taxNum without the skips
xSq_row = 0.0
for j in range(nCols):
if j in columnZeros:
if skipColumnZeros:
if verbose and not alreadyGivenZeroWarning:
print gm[0]
print " Skipping (zero-based) column number(s) %s, which sum to zero." % columnZeros
alreadyGivenZeroWarning = 1
else:
gm.append("Programming error.")
raise Glitch, gm
else:
theDiff = comps[k][j] - theExpected[k][j]
xSq_row += (theDiff * theDiff) / theExpected[k][j]
xSq += xSq_row
if getRows:
xSq_rows.append(xSq_row)
k += 1
#print xSq_rows
dof = (p.dim - len(columnZeros) - 1) * (len(comps) - 1)
prob = pf.chiSquaredProb(xSq, dof)
if verbose:
print "Part %i: Chi-square = %f, (dof=%i) P = %f" % (partNum, xSq, dof, prob)
if getRows:
#print " rows = %s" % xSq_rows
print "%20s %7s %s" % ('taxName', 'xSq_row', 'P (like puzzle)')
for tNum in range(self.nTax):
if not skipTaxNums or tNum not in skipTaxNums[partNum]:
thisProb = pf.chiSquaredProb(xSq_rows[tNum], self.parts[partNum].dim - 1)
print "%20s %7.5f %7.5f" % (self.taxNames[tNum], xSq_rows[tNum], thisProb)
else:
print "%20s --- ---" % self.taxNames[tNum]
if getRows:
results.append([xSq, dof, prob, xSq_rows])
else:
results.append([xSq, dof, prob])
else: # ie not isOk, ie there is a zero in a column sum
results.append(None) # Maybe a bad idea. Maybe it should just die, above.
if nColumnZeros and verbose:
print "There were %i column zeros." % nColumnZeros
return results
def simpleBigXSquared(self):
"""No frills calculation of bigXSquared.
As in :meth:`Data.Data.compoChiSquaredTest`, but with no
options, and hopefully faster. It can't handle gaps or
ambiguities. It should be ok for simulations. It returns a
list of bigXSquared numbers, one for each data partition.
If a character happens to not be there, then a column will be
zero, and so it can't be calculated. In that case -1.0 is
returned for that part.
"""
l = []
for p in self.parts:
l.append(pf.partBigXSquared(p.cPart))
return l
def simpleConstantSitesCount(self):
"""No frills constant sites count.
It can't handle gaps or ambiguities. It should be ok
for simulations. It returns a list of constant sites counts,
one for each data partition.
For each part, of the sites that are not all gaps+ambigs, if
the sites that are not gaps or ambigs are all the same, then
it is considered here to be a constant site.
"""
l = []
for p in self.parts:
l.append(pf.partSimpleConstantSitesCount(p.cPart))
return l
def dupe(self):
"""Copy, making new cParts."""
import copy
aligListCopy = copy.deepcopy(self.alignments)
for alig in aligListCopy:
# We do not want the cPart's, but neither do we want to free the originals.
for p in alig.parts:
p.cPart = None
del(alig.parts)
alig.parts = []
return Data(aligListCopy)
def bootstrap(self, seed=None):
"""Returns a new data object, filled with bootstrapped data.
It is a non-parametric bootstrap. Data partitions are handled
properly, that is if your data has a charpartition, the
bootstrap has the same charpartition, and sites are sampled
only from the appropriate charpartition subset. """
gm = ['Data.bootstrap()']
import copy
aligListCopy = copy.deepcopy(self.alignments)
for alig in aligListCopy:
# We do not want the cPart's, but neither do we want to free the originals.
for p in alig.parts:
p.cPart = None
del(alig.parts)
alig.parts = []
d = Data([])
d._fill(aligListCopy)
if not self.cData:
self._setCStuff()
d._setCStuff()
if 0:
print "\nSELF\n===="
self.dump()
print "\n\nNEW DATA\n========"
d.dump()
raise Glitch
isNewGSL_RNG = 0
if not var.gsl_rng:
var.gsl_rng = pf.get_gsl_rng()
isNewGSL_RNG = 1
#print "got var.gsl_rng = %i" % var.gsl_rng
# Set the GSL random number generator seed, only if it is a new GSL_RNG
if isNewGSL_RNG:
if seed != None:
try:
newSeed = int(seed)
pf.gsl_rng_set(var.gsl_rng, newSeed)
except ValueError:
print gm[0]
print " The seed should be convertable to an integer"
print " Using the process id instead."
pf.gsl_rng_set(var.gsl_rng, os.getpid())
else:
pf.gsl_rng_set(var.gsl_rng, os.getpid())
pf.bootstrapData(self.cData, d.cData, var.gsl_rng)
# Data.resetSequencesFromParts() uses
# Alignment.resetSequencesFromParts(), which uses
# partSeq = pf.symbolSequences(self.parts[i].cPart)
# which uses thePart->sequences
d.resetSequencesFromParts()
return d
def meanNCharsPerSite(self):
"""Mean number of different characters per site, of variable sites only.
Constant sites are ignored. Ambiguities and gaps are ignored.
This is implemented in C, allowing multiple parts. It is also
implemented in pure Python in the Alignment class, for single
parts (which also optionally gives you a distribution in
addition to the mean); see
:meth:`Alignment.Alignment.meanNCharsPerSite`.
"""
l = []
for p in self.parts:
l.append(pf.partMeanNCharsPerSite(p.cPart))
return l
| Linhua-Sun/p4-phylogenetics | p4/Data.py | Python | gpl-2.0 | 31,372 | 0.005355 |
from sys import argv
script, user_name = argv
prompt = '> '
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer)
| Skreex/LPTHW | ex14.py | Python | mit | 529 | 0.00189 |
import string, copy
def joinHeaders(first, second, joined, on):
joined.headers = first.headers[:]
mappedHeaders = {}
for header in second.headers:
if header == on:
continue
i = 0
newHeader = header
while newHeader in first.headers:
newHeader = '{0}_{1}'.format(newHeader, i)
i += 1
if i > 0:
mappedHeaders[header] = newHeader
joined.headers.append(newHeader)
return mappedHeaders
def mergeRow(row, toMerge, mappedHeaders):
for header in toMerge:
if header in mappedHeaders:
row[mappedHeaders[header]] = toMerge[header]
else:
row[header] = toMerge[header]
def mergeRows(first, second, joined, on, mappedHeaders):
joined.rows = copy.deepcopy(first.rows)
secondRows = copy.deepcopy(second.rows)
for secondRow in secondRows:
pivot = secondRow[on]
for row in joined.rows:
if row[on] == pivot:
mergeRow(row, secondRow, mappedHeaders)
break
else:
newRow = {}
mergeRow(newRow, secondRow, mappedHeaders)
joined.rows.append(newRow)
class Dataset:
def __init__(self, filename = '', separator=',', header=True):
self.headers = []
self.rows = []
try:
infile = file(filename, 'r')
if header:
self.headers = infile.readline().strip().split(separator)
for line in infile:
row = line.strip().split(separator)
if not header and not self.headers:
self.headers = ["V{0}".format(i) for i in range(len(row))]
self.rows.append({self.headers[i]:row[i] for i in range(len(row))})
infile.close()
except IOError:
pass
def export(self, filename):
outfile = file(filename, 'w')
outfile.write(','.join(self.headers))
for row in self.rows:
outfile.write('\n')
outfile.write(','.join([row[x] for x in self.headers]))
outfile.close()
def join(self, other, on):
"""Join self dataset with another dataset, creating a new dataset.
The original datasets remain unchanged.
The third argument is the header on which to join"""
# check for correct join
if not (on in self.headers or on in other.headers):
print "Error: header '{0}' not found in both collections".format(on)
return None
# create new dataset
joined = Dataset()
# fill new dataset with combined data
mappedHeaders = joinHeaders(self, other, joined, on)
mergeRows(self, other, joined, on, mappedHeaders)
joined.ensureFilled()
# return newly created dataset
return joined
def pivot(self):
"""Pivot this dataset into a new one, discarding current headers, using first column as new headers"""
pivoted = Dataset()
for (index, header) in enumerate(self.headers):
for row in self.rows:
if index == 0:
pivoted.headers.append(row[header])
else:
if len(pivoted.rows) < index:
pivoted.rows.extend([{} for x in range(index - len(pivoted.rows))])
pivoted.rows[index - 1][row[self.headers[0]]] = row[header]
return pivoted
def ensureFilled(self):
for row in self.rows:
for header in self.headers:
if not header in row:
row[header] = None
def append(self, other, ensureFilled = True):
"""Append rows of another dataset to this one, leaving the other dataset unchanged"""
self.rows.extend(other.rows)
self.headers.extend([x for x in other.headers if not x in self.headers])
if(ensureFilled):
self.ensureFilled()
return self
| dwilmer/rcpsp-testing-framework | dataset.py | Python | mit | 3,291 | 0.036159 |
"""Auto-generated file, do not edit by hand. 881 metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_881 = PhoneMetadata(id='001', country_code=881, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[0-36-9]\\d{8}', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='[0-36-9]\\d{8}', example_number='612345678', possible_length=(9,)),
number_format=[NumberFormat(pattern='(\\d)(\\d{3})(\\d{5})', format='\\1 \\2 \\3', leading_digits_pattern=['[0-36-9]'])])
| daviddrysdale/python-phonenumbers | python/phonenumbers/data/region_881.py | Python | apache-2.0 | 569 | 0.008787 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto (saifu@google.com)'
import base64
import hmac
import os
import time
import urlparse
import webapp2
import appengine_config
from common import jinja_utils
from models import models
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.courses import Course
from models.models import Student
from models.models import StudentProfileDAO
from models.models import TransientStudent
from models.roles import Roles
from google.appengine.api import namespace_manager
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
TRANSIENT_STUDENT = TransientStudent()
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
# Whether to record page load/unload events in a database.
CAN_PERSIST_PAGE_EVENTS = ConfigProperty(
'gcb_can_persist_page_events', bool, (
'Whether or not to record student page interactions in a '
'datastore. Without event recording, you cannot analyze student '
'page interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record tag events in a database.
CAN_PERSIST_TAG_EVENTS = ConfigProperty(
'gcb_can_persist_tag_events', bool, (
'Whether or not to record student tag interactions in a '
'datastore. Without event recording, you cannot analyze student '
'tag interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record events in a database.
CAN_PERSIST_ACTIVITY_EVENTS = ConfigProperty(
'gcb_can_persist_activity_events', bool, (
'Whether or not to record student activity interactions in a '
'datastore. Without event recording, you cannot analyze student '
'activity interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Date format string for displaying datetimes in UTC.
# Example: 2013-03-21 13:00 UTC
HUMAN_READABLE_DATETIME_FORMAT = '%Y-%m-%d, %H:%M UTC'
# Date format string for displaying dates. Example: 2013-03-21
HUMAN_READABLE_DATE_FORMAT = '%Y-%m-%d'
# Time format string for displaying times. Example: 01:16:40 UTC.
HUMAN_READABLE_TIME_FORMAT = '%H:%M:%S UTC'
class PageInitializer(object):
"""Abstract class that defines an interface to initialize page headers."""
@classmethod
def initialize(cls, template_value):
raise NotImplementedError
class DefaultPageInitializer(PageInitializer):
"""Implements default page initializer."""
@classmethod
def initialize(cls, template_value):
pass
class PageInitializerService(object):
"""Installs the appropriate PageInitializer."""
_page_initializer = DefaultPageInitializer
@classmethod
def get(cls):
return cls._page_initializer
@classmethod
def set(cls, page_initializer):
cls._page_initializer = page_initializer
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.default_action
if action not in self.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or action not in self.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
@classmethod
def is_absolute(cls, url):
return bool(urlparse.urlparse(url).scheme)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not cls.is_absolute(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.netloc, base, None, None, None))
return base
def __init__(self, *args, **kwargs):
super(ApplicationHandler, self).__init__(*args, **kwargs)
self.template_value = {}
def get_template(self, template_file, additional_dirs=None):
"""Computes location of template files for the current namespace."""
self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ()
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value[
'is_read_write_course'] = self.app_context.fs.is_read_write()
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
template_environ = self.app_context.get_template_environ(
self.template_value[COURSE_INFO_KEY]['course']['locale'],
additional_dirs
)
template_environ.filters[
'gcb_tags'] = jinja_utils.get_gcb_tags_filter(self)
return template_environ.get_template(template_file)
def canonicalize_url(self, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
is_relative = (
not self.is_absolute(location) and
not location.startswith(self.app_context.get_slug()))
has_slug = (
self.app_context.get_slug() and self.app_context.get_slug() != '/')
if is_relative and has_slug:
location = '%s%s' % (self.app_context.get_slug(), location)
return location
def redirect(self, location, normalize=True):
if normalize:
location = self.canonicalize_url(location)
super(ApplicationHandler, self).redirect(location)
class BaseHandler(ApplicationHandler):
"""Base handler."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
self.course = None
def get_course(self):
if not self.course:
self.course = Course(self)
return self.course
def find_unit_by_id(self, unit_id):
"""Gets a unit with a specific id or fails with an exception."""
return self.get_course().find_unit_by_id(unit_id)
def get_units(self):
"""Gets all units in the course."""
return self.get_course().get_units()
def get_lessons(self, unit_id):
"""Gets all lessons (in order) in the specific course unit."""
return self.get_course().get_lessons(unit_id)
def get_progress_tracker(self):
"""Gets the progress tracker for the course."""
return self.get_course().get_progress_tracker()
def get_user(self):
"""Get the current user."""
return users.get_current_user()
def personalize_page_and_get_user(self):
"""If the user exists, add personalized fields to the navbar."""
user = self.get_user()
PageInitializerService.get().initialize(self.template_value)
if hasattr(self, 'app_context'):
self.template_value['can_register'] = self.app_context.get_environ(
)['reg_form']['can_register']
if user:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = (
users.create_logout_url(self.request.uri))
self.template_value['transient_student'] = False
# configure page events
self.template_value['record_tag_events'] = (
CAN_PERSIST_TAG_EVENTS.value)
self.template_value['record_page_events'] = (
CAN_PERSIST_PAGE_EVENTS.value)
self.template_value['record_events'] = (
CAN_PERSIST_ACTIVITY_EVENTS.value)
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
else:
self.template_value['loginUrl'] = users.create_login_url(
self.request.uri)
self.template_value['transient_student'] = True
return None
return user
def personalize_page_and_get_enrolled(
self, supports_transient_student=False):
"""If the user is enrolled, add personalized fields to the navbar."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
self.template_value['transient_student'] = True
student = TRANSIENT_STUDENT
if student.is_transient:
if supports_transient_student and (
self.app_context.get_environ()['course']['browsable']):
return TRANSIENT_STUDENT
elif user is None:
self.redirect(
users.create_login_url(self.request.uri), normalize=False
)
return None
else:
self.redirect('/preview')
return None
# Patch Student models which (for legacy reasons) do not have a user_id
# attribute set.
if not student.user_id:
student.user_id = user.user_id()
student.put()
return student
def assert_xsrf_token_or_fail(self, request, action):
"""Asserts the current request has proper XSRF token or fails."""
token = request.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
self.error(403)
return False
return True
def render(self, template_file):
"""Renders a template."""
template = self.get_template(template_file)
self.response.out.write(template.render(self.template_value))
class BaseRESTHandler(BaseHandler):
"""Base REST handler."""
def assert_xsrf_token_or_fail(self, token_dict, action, args_dict):
"""Asserts that current request has proper XSRF token or fails."""
token = token_dict.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
transforms.send_json_response(
self, 403,
'Bad XSRF token. Please reload the page and try again',
args_dict)
return False
return True
def validation_error(self, message, key=None):
"""Deliver a validation message."""
if key:
transforms.send_json_response(
self, 412, message, payload_dict={'key': key})
else:
transforms.send_json_response(self, 412, message)
class PreviewHandler(BaseHandler):
"""Handler for viewing course preview."""
def get(self):
"""Handles GET requests."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
student = TRANSIENT_STUDENT
# If the course is browsable, or the student is logged in and
# registered, redirect to the main course page.
if ((student and not student.is_transient) or
self.app_context.get_environ()['course']['browsable']):
self.redirect('/course')
return
self.template_value['transient_student'] = True
self.template_value['can_register'] = self.app_context.get_environ(
)['reg_form']['can_register']
self.template_value['navbar'] = {'course': True}
self.template_value['units'] = self.get_units()
self.template_value['show_registration_page'] = True
course = self.app_context.get_environ()['course']
self.template_value['video_exists'] = bool(
'main_video' in course and
'url' in course['main_video'] and
course['main_video']['url'])
self.template_value['image_exists'] = bool(
'main_image' in course and
'url' in course['main_image'] and
course['main_image']['url'])
if user:
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
additional_registration_fields = self.app_context.get_environ(
)['reg_form']['additional_registration_fields']
if profile is not None and not additional_registration_fields:
self.template_value['show_registration_page'] = False
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('preview.html')
class RegisterHandler(BaseHandler):
"""Handler for course registration."""
def get(self):
"""Handles GET request."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(
users.create_login_url(self.request.uri), normalize=False)
return
student = Student.get_enrolled_student_by_email(user.email())
if student:
self.redirect('/course')
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.redirect('/course#registration_closed')
return
# pre-fill nick name from the profile if available
self.template_value['current_name'] = ''
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
if profile and profile.nick_name:
self.template_value['current_name'] = profile.nick_name
self.template_value['navbar'] = {}
self.template_value['transient_student'] = True
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('register.html')
def post(self):
"""Handles POST requests."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(
users.create_login_url(self.request.uri), normalize=False)
return
if not self.assert_xsrf_token_or_fail(self.request, 'register-post'):
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.redirect('/course#registration_closed')
return
if 'name_from_profile' in self.request.POST.keys():
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
name = profile.nick_name
else:
name = self.request.get('form01')
Student.add_new_student_for_current_user(
name, transforms.dumps(self.request.POST.items()))
# Render registration confirmation page
self.redirect('/course#registration_confirmation')
class ForumHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
self.template_value['navbar'] = {'forum': True}
self.render('forum.html')
class AnuncioHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
self.template_value['navbar'] = {'plataforma': True}
self.render('plataform.html')
class CursosHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
self.template_value['navbar'] = {'plataforma': True}
self.render('home.html')
class StudentProfileHandler(BaseHandler):
"""Handles the click to 'Progress' link in the nav bar."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
name = student.name
profile = student.profile
if profile:
name = profile.nick_name
self.template_value['navbar'] = {'progress': True}
self.template_value['student'] = student
self.template_value['student_name'] = name
self.template_value['date_enrolled'] = student.enrolled_on.strftime(
HUMAN_READABLE_DATE_FORMAT)
self.template_value['score_list'] = course.get_all_scores(student)
self.template_value['overall_score'] = course.get_overall_score(student)
self.template_value['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-edit'))
self.template_value['can_edit_name'] = (
not models.CAN_SHARE_STUDENT_PROFILE.value)
self.render('student_profile.html')
class StudentEditStudentHandler(BaseHandler):
"""Handles edits to student records by students."""
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-edit'):
return
Student.rename_current(self.request.get('name'))
self.redirect('/student/home')
class StudentUnenrollHandler(BaseHandler):
"""Handler for students to unenroll themselves."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
self.template_value['student'] = student
self.template_value['navbar'] = {}
self.template_value['student_unenroll_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-unenroll'))
self.render('unenroll_confirmation_check.html')
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-unenroll'):
return
Student.set_enrollment_status_for_current(False)
self.template_value['navbar'] = {}
self.template_value['transient_student'] = True
self.render('unenroll_confirmation.html')
class XsrfTokenManager(object):
"""Provides XSRF protection by managing action/user tokens in memcache."""
# Max age of the token (4 hours).
XSRF_TOKEN_AGE_SECS = 60 * 60 * 4
# Token delimiters.
DELIMITER_PRIVATE = ':'
DELIMITER_PUBLIC = '/'
# Default nickname to use if a user does not have a nickname,
USER_ID_DEFAULT = 'default'
@classmethod
def init_xsrf_secret_if_none(cls):
"""Verifies that non-default XSRF secret exists; creates one if not."""
# Any non-default value is fine.
if XSRF_SECRET.value and XSRF_SECRET.value != XSRF_SECRET.default_value:
return
# All property manipulations must run in the default namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
appengine_config.DEFAULT_NAMESPACE_NAME)
# Look in the datastore directly.
entity = ConfigPropertyEntity.get_by_key_name(XSRF_SECRET.name)
if not entity:
entity = ConfigPropertyEntity(key_name=XSRF_SECRET.name)
# Any non-default non-None value is fine.
if (entity.value and not entity.is_draft and
(str(entity.value) != str(XSRF_SECRET.default_value))):
return
# Initialize to random value.
entity.value = base64.urlsafe_b64encode(
os.urandom(XSRF_SECRET_LENGTH))
entity.is_draft = False
entity.put()
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _create_token(cls, action_id, issued_on):
"""Creates a string representation (digest) of a token."""
cls.init_xsrf_secret_if_none()
# We have decided to use transient tokens stored in memcache to reduce
# datastore costs. The token has 4 parts: hash of the actor user id,
# hash of the action, hash of the time issued and the plain text of time
# issued.
# Lookup user id.
user = users.get_current_user()
if user:
user_id = user.user_id()
else:
user_id = cls.USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digester = hmac.new(str(XSRF_SECRET.value))
digester.update(str(user_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(action_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(issued_on))
digest = digester.digest()
token = '%s%s%s' % (
issued_on, cls.DELIMITER_PUBLIC, base64.urlsafe_b64encode(digest))
return token
@classmethod
def create_xsrf_token(cls, action):
return cls._create_token(action, time.time())
@classmethod
def is_xsrf_token_valid(cls, token, action):
"""Validate a given XSRF token by retrieving it from memcache."""
try:
parts = token.split(cls.DELIMITER_PUBLIC)
if len(parts) != 2:
return False
issued_on = long(parts[0])
age = time.time() - issued_on
if age > cls.XSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(action, issued_on)
if authentic_token == token:
return True
return False
except Exception: # pylint: disable=broad-except
return False
| chessco/cursus | controllers/utils.py | Python | apache-2.0 | 25,293 | 0.000474 |
#!/usr/bin/env python
import re,urllib2
class Get_public_ip:
def getip(self):
try:
myip = self.visit("http://www.whereismyip.com/")
except:
try:
myip = self.visit("http://www.ip138.com/ip2city.asp")
except:
myip = "So sorry!!!"
return myip
def visit(self,url):
opener = urllib2.urlopen(url)
if url == opener.geturl():
str = opener.read()
return re.search('\d+\.\d+\.\d+\.\d+',str).group(0)
if __name__ == "__main__":
getmyip = Get_public_ip()
print getmyip.getip()
| ielnehc/ltmh | tools/get_public_ip.py | Python | mit | 610 | 0.02623 |
"""
Copyright (C) 2017-2021 Vanessa Sochat.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from django.contrib.sitemaps import Sitemap
from shub.apps.main.models import Container, Collection
class BaseSitemap(Sitemap):
priority = 0.5
def location(self, obj):
return obj.get_absolute_url()
class ContainerSitemap(BaseSitemap):
changefreq = "weekly"
def lastmod(self, obj):
return obj.build_date
def items(self):
return [x for x in Container.objects.all() if x.collection.private is False]
class CollectionSitemap(BaseSitemap):
changefreq = "weekly"
def lastmod(self, obj):
return obj.modify_date
def items(self):
return [x for x in Collection.objects.all() if x.private is False]
| singularityhub/sregistry | shub/apps/base/sitemap.py | Python | mpl-2.0 | 917 | 0.001091 |
'''This folder contains various example scripts demonstrating MAVLink functionality.'''
| AndreasAntener/mavlink | pymavlink/examples/__init__.py | Python | lgpl-3.0 | 88 | 0.011364 |
from NVDAObjects.IAccessible import IAccessible
import nvwave
import speech
import os
class AutocompleteList(IAccessible):
def event_selection(self):
speech.cancelSpeech()
speech.speakText(self.name)
| ThomasStivers/nvda-notepadPlusPlus | addon/appModules/notepad++/autocomplete.py | Python | gpl-2.0 | 210 | 0.033333 |
"""file_parser.py reads text file and parse the item into a list."""
def file_to_list(input_file):
data_list_trim = []
try:
with open(input_file) as in_put:
input_data = in_put.readlines()
if len(input_data) == 1:
print()
data_list = input_data[0].replace('"', '').strip()
data_list_trim = data_list.split(',')
elif len(input_data) > 1:
print()
for row in input_data:
row_list = row.replace('"', '').strip()
row_list_trim = row_list.split(',')
data_list_trim = data_list_trim + row_list_trim
else:
print('no content is the file')
except OSError as err:
print('Failed to open file', err)
return data_list_trim
| roy-boy/python_scripts | file_parser.py | Python | gpl-3.0 | 847 | 0 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBigmemorySri(RPackage):
"""This package provides a shared resource interface
for the bigmemory and synchronicity packages."""
homepage = "https://cloud.r-project.org/web/packages/bigmemory.sri/index.html"
url = "https://cloud.r-project.org/src/contrib/bigmemory.sri_0.1.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/bigmemory.sri"
version('0.1.3', sha256='55403252d8bae9627476d1f553236ea5dc7aa6e54da6980526a6cdc66924e155')
| iulian787/spack | var/spack/repos/builtin/packages/r-bigmemory-sri/package.py | Python | lgpl-2.1 | 708 | 0.00565 |
import pytest
from .utils import digit_float
import numpy as np
vowel_data_y_dimension = 11
@pytest.fixture
def vowel_data():
from esl_model.datasets import VowelDataSet
data = VowelDataSet()
return data.return_all()
@pytest.fixture
def SAHeart_data():
from esl_model.datasets import SAHeartDataSet
data = SAHeartDataSet()
return data.return_all()
def test_vowel_data():
from esl_model.datasets import VowelDataSet
data = VowelDataSet()
assert list(data.train_y[:5]) == list(range(1, 6))
data.select_features = data.feature_names[:2]
assert np.array_equal(data.train_x[:1], data._train_x.iloc[:1, :2].values)
ft = list(range(3))
data.select_features = ft
assert np.array_equal(data.train_x[:1], data._train_x.iloc[:1, ft].values)
def test_indicator_matrix(vowel_data):
from esl_model.ch4.models import LinearRegressionIndicatorMatrix
train_x, train_y, test_x, test_y, features = vowel_data
lrm = LinearRegressionIndicatorMatrix(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
lrm.pre_processing()
lrm.train()
print(lrm.error_rate)
test_result = lrm.test(test_x, test_y)
print(test_result.error_rate)
assert digit_float(lrm.error_rate) == 0.477
assert digit_float(test_result.error_rate) == 0.667
def test_LDA(vowel_data):
from esl_model.ch4.models import LDAModel
train_x, train_y, test_x, test_y, features = vowel_data
lda = LDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
lda.pre_processing()
lda.train()
print(lda.y_hat[:10])
print(lda.error_rate)
te = lda.test(test_x, test_y)
print(te.error_rate)
assert digit_float(lda.error_rate) == 0.316
assert digit_float(te.error_rate) == 0.556
def test_QDA(vowel_data):
from esl_model.ch4.models import QDAModel
train_x, train_y, test_x, test_y, features = vowel_data
qda = QDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
qda.pre_processing()
qda.train()
print(qda.y_hat[:10])
print(qda.error_rate)
te = qda.test(test_x, test_y).error_rate
print(te)
assert digit_float(qda.error_rate) == 0.011
assert digit_float(te) == 0.528
def test_RDA(vowel_data):
from esl_model.ch4.models import RDAModel
train_x, train_y, test_x, test_y, features = vowel_data
# http://waxworksmath.com/Authors/G_M/Hastie/WriteUp/weatherwax_epstein_hastie_solutions_manual.pdf
# pp 60
model = RDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension, alpha=0.969697)
model.pre_processing()
model.train()
print(model.error_rate)
te = model.test(test_x, test_y)
print(te.error_rate)
assert digit_float(te.error_rate) == 0.478
def test_LDA_computation(vowel_data):
from esl_model.ch4.models import LDAForComputation
train_x, train_y, test_x, test_y, features = vowel_data
model = LDAForComputation(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
model.pre_processing()
model.train()
from esl_model.ch4.models import LDAModel
lda = LDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
lda.pre_processing()
lda.train()
print(model.error_rate)
assert np.isclose(model.error_rate, lda.error_rate)
assert np.isclose(model.test(test_x, test_y).error_rate, lda.test(test_x, test_y).error_rate)
def test_RRLDA(vowel_data):
from esl_model.ch4.models import ReducedRankLDAModel
train_x, train_y, test_x, test_y, features = vowel_data
model = ReducedRankLDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension, L=2)
model.pre_processing()
model.train()
print(model.y_hat[:5])
print(model.error_rate)
te = model.test(test_x, test_y)
print(te.error_rate)
assert digit_float(model.error_rate) == 0.350
assert digit_float(te.error_rate) == 0.491
def test_SAHeart_data_set(SAHeart_data):
x, y, *_ = SAHeart_data
assert x[1, 2] == 4.41
assert list(y[:4]) == [1, 1, 0, 1]
def test_binary_logistic_regression(SAHeart_data):
from esl_model.datasets import SAHeartDataSet
data = SAHeartDataSet(select_features=[1, 2, 4, 8])
from esl_model.ch4.models import BinaryLogisticRegression
train_x = data.train_x
train_y = data.train_y
model = BinaryLogisticRegression(train_x=train_x, train_y=train_y, n_class=2, do_standardization=False)
model.pre_processing()
model.train()
print(model.beta_hat)
print(model.error_rate)
print('yhat', model.y_hat[:5])
print(repr(model.std_err))
print('z score', model.z_score)
eq_beta_hat = np.array([[-4.20427542],
[0.08070059],
[0.16758415],
[0.92411669],
[0.04404247]])
eq_std_err = np.array([0.498348, 0.02551477, 0.05418979, 0.22318295, 0.00974321])
assert np.allclose(model.beta_hat, eq_beta_hat)
assert digit_float(model.error_rate) == 0.268
assert np.allclose(model.std_err, eq_std_err)
data = SAHeartDataSet(select_features=[0, 1, 2, 4, 6, 7, 8])
train_x = data.train_x
train_y = data.train_y
model = BinaryLogisticRegression(train_x=train_x, train_y=train_y, n_class=2, do_standardization=False)
model.pre_processing()
model.train()
assert digit_float(model.error_rate) == 0.271
| littlezz/ESL-Model | tests/test_ch4.py | Python | mit | 5,437 | 0.002391 |
import os
import sys
from pathlib import Path
class Setup:
CONFIGURATION_FILE = os.path.join(Path(__file__).parents[1], "config", "server.cfg")
VLC_DEFAULT_COMMAND = "vlc -f"
POSIX = 'posix' in sys.builtin_module_names
VLC_PLAYLIST_END = "vlc://quit"
| danfr/RemoteTV | Server/bin/Setup.py | Python | mit | 269 | 0.003717 |
"""
Proposal Target Operator selects foreground and background roi and assigns label, bbox_transform to them.
"""
from __future__ import print_function
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from rcnn.io.rcnn import sample_rois
DEBUG = False
class ProposalTargetOperator(mx.operator.CustomOp):
def __init__(self, num_classes, batch_images, batch_rois, fg_fraction):
super(ProposalTargetOperator, self).__init__()
self._num_classes = num_classes
self._batch_images = batch_images
self._batch_rois = batch_rois
self._fg_fraction = fg_fraction
if DEBUG:
self._count = 0
self._fg_num = 0
self._bg_num = 0
def forward(self, is_train, req, in_data, out_data, aux):
assert self._batch_rois % self._batch_images == 0, \
'BATCHIMAGES {} must devide BATCH_ROIS {}'.format(self._batch_images, self._batch_rois)
rois_per_image = self._batch_rois / self._batch_images
fg_rois_per_image = np.round(self._fg_fraction * rois_per_image).astype(int)
all_rois = in_data[0].asnumpy()
gt_boxes = in_data[1].asnumpy()
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack((all_rois, np.hstack((zeros, gt_boxes[:, :-1]))))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), 'Only single item batches are supported'
rois, labels, bbox_targets, bbox_weights = \
sample_rois(all_rois, fg_rois_per_image, rois_per_image, self._num_classes, gt_boxes=gt_boxes)
if DEBUG:
print("labels=", labels)
print('num fg: {}'.format((labels > 0).sum()))
print('num bg: {}'.format((labels == 0).sum()))
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
print("self._count=", self._count)
print('num fg avg: {}'.format(self._fg_num / self._count))
print('num bg avg: {}'.format(self._bg_num / self._count))
print('ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num)))
for ind, val in enumerate([rois, labels, bbox_targets, bbox_weights]):
self.assign(out_data[ind], req[ind], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
@mx.operator.register('proposal_target')
class ProposalTargetProp(mx.operator.CustomOpProp):
def __init__(self, num_classes, batch_images, batch_rois, fg_fraction='0.25'):
super(ProposalTargetProp, self).__init__(need_top_grad=False)
self._num_classes = int(num_classes)
self._batch_images = int(batch_images)
self._batch_rois = int(batch_rois)
self._fg_fraction = float(fg_fraction)
def list_arguments(self):
return ['rois', 'gt_boxes']
def list_outputs(self):
return ['rois_output', 'label', 'bbox_target', 'bbox_weight']
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
gt_boxes_shape = in_shape[1]
output_rois_shape = (self._batch_rois, 5)
label_shape = (self._batch_rois, )
bbox_target_shape = (self._batch_rois, self._num_classes * 4)
bbox_weight_shape = (self._batch_rois, self._num_classes * 4)
return [rpn_rois_shape, gt_boxes_shape], \
[output_rois_shape, label_shape, bbox_target_shape, bbox_weight_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalTargetOperator(self._num_classes, self._batch_images, self._batch_rois, self._fg_fraction)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| likelyzhao/mxnet | example/rcnn/rcnn/symbol/proposal_target.py | Python | apache-2.0 | 3,893 | 0.002312 |
"""
Tests for structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.datasets import macrodata
from statsmodels.tsa.statespace import structural
from statsmodels.tsa.statespace.structural import UnobservedComponents
from .results import results_structural
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
def run_ucm(name):
true = getattr(results_structural, name)
for model in true['models']:
kwargs = model.copy()
kwargs.update(true['kwargs'])
# Make a copy of the data
values = dta.copy()
freq = kwargs.pop('freq', None)
if freq is not None:
values.index = pd.date_range(start='1959-01-01', periods=len(dta),
freq=freq)
# Test pandas exog
if 'exog' in kwargs:
# Default value here is pd.Series object
exog = np.log(values['realgdp'])
# Also allow a check with a 1-dim numpy array
if kwargs['exog'] == 'numpy':
exog = exog.values.squeeze()
kwargs['exog'] = exog
# Create the model
mod = UnobservedComponents(values['unemp'], **kwargs)
# Smoke test for starting parameters, untransform, transform
# Also test that transform and untransform are inverses
mod.start_params
assert_allclose(mod.start_params, mod.transform_params(mod.untransform_params(mod.start_params)))
# Fit the model at the true parameters
res_true = mod.filter(true['params'])
# Check that the cycle bounds were computed correctly
freqstr = freq[0] if freq is not None else values.index.freqstr[0]
if freqstr == 'A':
cycle_period_bounds = (1.5, 12)
elif freqstr == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freqstr == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
# Test that the cycle frequency bound is correct
assert_equal(mod.cycle_frequency_bound,
(2*np.pi / cycle_period_bounds[1],
2*np.pi / cycle_period_bounds[0])
)
# Test that the likelihood is correct
rtol = true.get('rtol', 1e-7)
atol = true.get('atol', 0)
assert_allclose(res_true.llf, true['llf'], rtol=rtol, atol=atol)
# Smoke test for plot_components
if have_matplotlib:
fig = res_true.plot_components()
plt.close(fig)
# Now fit the model via MLE
with warnings.catch_warnings(record=True) as w:
res = mod.fit(disp=-1)
# If we found a higher likelihood, no problem; otherwise check
# that we're very close to that found by R
if res.llf <= true['llf']:
assert_allclose(res.llf, true['llf'], rtol=1e-4)
# Smoke test for summary
res.summary()
def test_irregular():
run_ucm('irregular')
def test_fixed_intercept():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_intercept')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_constant():
run_ucm('deterministic_constant')
def test_random_walk():
run_ucm('random_walk')
def test_local_level():
run_ucm('local_level')
def test_fixed_slope():
run_ucm('fixed_slope')
def test_fixed_slope():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_slope')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_trend():
run_ucm('deterministic_trend')
def test_random_walk_with_drift():
run_ucm('random_walk_with_drift')
def test_local_linear_deterministic_trend():
run_ucm('local_linear_deterministic_trend')
def test_local_linear_trend():
run_ucm('local_linear_trend')
def test_smooth_trend():
run_ucm('smooth_trend')
def test_random_trend():
run_ucm('random_trend')
def test_cycle():
run_ucm('cycle')
def test_seasonal():
run_ucm('seasonal')
def test_reg():
run_ucm('reg')
def test_rtrend_ar1():
run_ucm('rtrend_ar1')
def test_lltrend_cycle_seasonal_reg_ar1():
run_ucm('lltrend_cycle_seasonal_reg_ar1')
def test_mle_reg():
endog = np.arange(100)*1.0
exog = endog*2
# Make the fit not-quite-perfect
endog[::2] += 0.01
endog[1::2] -= 0.01
with warnings.catch_warnings(record=True) as w:
mod1 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=False)
res1 = mod1.fit(disp=-1)
mod2 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=True)
res2 = mod2.fit(disp=-1)
assert_allclose(res1.regression_coefficients.filtered[0, -1], 0.5, atol=1e-5)
assert_allclose(res2.params[1], 0.5, atol=1e-5)
def test_specifications():
endog = [1, 2]
# Test that when nothing specified, a warning is issued and the model that
# is fit is one with irregular=True and nothing else.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog)
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'irregular')
# Test an invalid string trend specification
assert_raises(ValueError, UnobservedComponents, endog, 'invalid spec')
# Test that if a trend component is specified without a level component,
# a warning is issued and a deterministic level component is added
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog, trend=True, irregular=True)
message = ("Trend component specified without level component;"
" deterministic level component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'deterministic trend')
# Test that if a string specification is provided, a warning is issued if
# the boolean attributes are also specified
trend_attributes = ['irregular', 'trend', 'stochastic_level',
'stochastic_trend']
for attribute in trend_attributes:
with warnings.catch_warnings(record=True) as w:
kwargs = {attribute: True}
mod = UnobservedComponents(endog, 'deterministic trend', **kwargs)
message = ("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
assert_equal(str(w[0].message), message)
# Test that a seasonal with period less than two is invalid
assert_raises(ValueError, UnobservedComponents, endog, seasonal=1)
def test_start_params():
# Test that the behavior is correct for multiple exogenous and / or
# autoregressive components
# Parameters
nobs = int(1e4)
beta = np.r_[10, -2]
phi = np.r_[0.5, 0.1]
# Generate data
np.random.seed(1234)
exog = np.c_[np.ones(nobs), np.arange(nobs)*1.0]
eps = np.random.normal(size=nobs)
endog = np.zeros(nobs+2)
for t in range(1, nobs):
endog[t+1] = phi[0] * endog[t] + phi[1] * endog[t-1] + eps[t]
endog = endog[2:]
endog += np.dot(exog, beta)
# Now just test that the starting parameters are approximately what they
# ought to be (could make this arbitrarily precise by increasing nobs,
# but that would slow down the test for no real gain)
mod = UnobservedComponents(endog, exog=exog, autoregressive=2)
assert_allclose(mod.start_params, [1., 0.5, 0.1, 10, -2], atol=1e-1)
def test_forecast():
endog = np.arange(50) + 10
exog = np.arange(50)
mod = UnobservedComponents(endog, exog=exog, level='dconstant')
res = mod.smooth([1e-15, 1])
actual = res.forecast(10, exog=np.arange(50,60)[:,np.newaxis])
desired = np.arange(50,60) + 10
assert_allclose(actual, desired)
| saketkc/statsmodels | statsmodels/tsa/statespace/tests/test_structural.py | Python | bsd-3-clause | 9,003 | 0.001666 |
from __future__ import absolute_import
from .base import Filter
from six.moves.urllib.parse import urlparse
from sentry.utils.data_filters import FilterStatKeys
LOCAL_IPS = frozenset(['127.0.0.1', '::1'])
LOCAL_DOMAINS = frozenset(['127.0.0.1', 'localhost'])
class LocalhostFilter(Filter):
id = FilterStatKeys.LOCALHOST
name = 'Filter out events coming from localhost'
description = 'This applies to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.'
def get_ip_address(self, data):
try:
return data['sentry.interfaces.User']['ip_address']
except KeyError:
return ''
def get_url(self, data):
try:
return data['sentry.interfaces.Http']['url'] or ''
except KeyError:
return ''
def get_domain(self, data):
return urlparse(self.get_url(data)).hostname
def test(self, data):
return self.get_ip_address(data) in LOCAL_IPS or self.get_domain(data) in LOCAL_DOMAINS
| ifduyue/sentry | src/sentry/filters/localhost.py | Python | bsd-3-clause | 996 | 0.002008 |
'''
Puck: FreeBSD virtualization guest configuration server
Copyright (C) 2011 The Hotel Communication Network inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os.path
import cherrypy
from libs.controller import *
import models
from models import Users
class RootController(Controller):
crumbs = [Crumb("/", "Home")]
def __init__(self, lookup):
Controller.__init__(self, lookup)
self._lookup = lookup
self._routes = {}
@cherrypy.expose
@cherrypy.tools.myauth()
def index(self):
return self.render("index.html", self.crumbs[:-1])
@cherrypy.expose
def login(self, **post):
if post:
self._login(post)
return self.render("login.html", self.crumbs[:-1])
@cherrypy.expose
def logout(self, **post):
cherrypy.session.delete()
raise cherrypy.HTTPRedirect("/login")
def add(self, route, cls):
self._routes[route] = cls
def load(self):
[setattr(self, route, self._routes[route](self._lookup)) for route in self._routes]
def _login(self, post):
fields = ['user.username', 'user.password']
for f in fields:
if not f in post:
cherrypy.session['flash'] = "Invalid form data."
return False
hash_password = Users.hash_password(post['user.password'])
user = Users.first(username=post['user.username'], password=hash_password)
if not user:
cherrypy.session['flash'] = 'Invalid username or password.'
return False
creds = user.generate_auth()
cherrypy.session['user.id'] = user.id
cherrypy.session['user.group'] = user.user_group
cherrypy.session['credentials'] = creds
raise cherrypy.HTTPRedirect('/index')
| masom/Puck | server/controllers/root.py | Python | lgpl-3.0 | 2,450 | 0.002041 |
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2014 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
from amoco.arch.avr.env import *
from amoco.cas.mapper import mapper
# ------------------------------------------------------------------------------
# low level functions :
def _push_(fmap, x):
fmap[sp] = fmap[sp] - x.length
fmap[mem(sp, x.size)] = x
def _pop_(fmap, _l):
fmap[_l] = fmap(mem(sp, _l.size))
fmap[sp] = fmap[sp] + _l.length
def __pc(f):
def pcnpc(i, fmap):
fmap[pc] = fmap[pc] + i.length
if len(fmap.conds) > 0:
cond = fmap.conds.pop()
m = mapper()
f(i, m)
for l, v in m:
fmap[l] = tst(cond, v, fmap(l))
else:
f(i, fmap)
return pcnpc
def __nopc(f):
return f.__closure__[0].cell_contents
def __nopc(f):
return f.__closure__[0].cell_contents
# flags for arithmetic operations:
def __setflags__A(i, fmap, a, b, x, neg=False):
fmap[zf] = x == 0
fmap[nf] = x.bit(7)
if neg:
a, x = ~a, ~x
fmap[cf] = (
((a.bit(7)) & (b.bit(7)))
| ((b.bit(7)) & (~x.bit(7)))
| ((a.bit(7)) & (~x.bit(7)))
)
fmap[vf] = ((a.bit(7)) & (b.bit(7)) & (~x.bit(7))) | (
(~a.bit(7)) & (~b.bit(7)) & (x.bit(7))
)
fmap[sf] = fmap[nf] ^ fmap[vf]
fmap[hf] = (
((a.bit(3)) & (b.bit(3)))
| ((b.bit(3)) & (~x.bit(3)))
| ((a.bit(3)) & (~x.bit(3)))
)
# flags for logical operations:
def __setflags__L(i, fmap, a, b, x):
fmap[zf] = x == 0
fmap[nf] = x.bit(7)
fmap[vf] = bit0
fmap[sf] = fmap[nf] ^ fmap[vf]
# flags for shift operations:
def __setflags__S(i, fmap, a, x):
# cf must be set before calling this function.
fmap[zf] = x == 0
fmap[nf] = x.bit(7)
fmap[vf] = fmap[nf] ^ fmap[cf]
fmap[sf] = fmap[nf] ^ fmap[vf]
# ixxx is the translation of AVR instruction xxx.
# ------------------------------------------------------------------------------
@__pc
def i_NOP(i, fmap):
pass
def i_SLEEP(i, fmap):
fmap[pc] = ext("SLEEP", size=pc.size).call(fmap)
def i_BREAK(i, fmap):
fmap[pc] = ext("BREAK", size=pc.size).call(fmap)
def i_IN(i, fmap):
r, port = i.operands
fmap[pc] = ext("IN", size=pc.size).call(fmap)
def i_OUT(i, fmap):
port, r = i.operands
fmap[pc] = ext("OUT", size=pc.size).call(fmap)
# arithmetic & logic instructions:
##################################
@__pc
def i_ADD(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a + b
__setflags__A(i, fmap, a, b, x)
fmap[dst] = x
@__pc
def i_ADIW(i, fmap):
dst, src = i.operands
if i.misc["W"]:
assert dst is R[24]
a = fmap(composer([dst, R[25]]))
else:
a = fmap(dst)
b = fmap(src)
x = a + b
__setflags__A(i, fmap, a, b, x)
fmap[dst] = x[0 : dst.size]
if i.misc["W"]:
assert x.size == 16
fmap[R[25]] = x[8:16]
@__pc
def i_ADC(i, fmap):
dst, src = i.operands
_c = fmap[cf]
__nopc(i_ADD)(i, fmap)
a = fmap(dst)
b = tst(_c, cst(1, a.size), cst(0, a.size))
x = a + b
__setflags__A(i, fmap, a, b, x)
fmap[dst] = x
@__pc
def i_INC(i, fmap):
dst = i.operands[0]
a = fmap(dst)
b = cst(1, dst.size)
x = a + b
fmap[zf] = x == 0
fmap[nf] = x.bit(7)
fmap[vf] = a == cst(0x7F, 8)
fmap[sf] = fmap[nf] ^ fmap[vf]
fmap[dst] = x
@__pc
def i_CP(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
@__pc
def i_CPSE(i, fmap):
rd, rr = i.operands
fmap.conds[fmap(rd == rr)]
@__pc
def i_SBRC(i, fmap):
b = i.operands[0]
fmap.conds[fmap(b == bit0)]
@__pc
def i_SBRS(i, fmap):
b = i.operands[0]
fmap.conds[fmap(b == bit1)]
@__pc
def i_SUB(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
fmap[dst] = x
i_SUBI = i_SUB
@__pc
def i_SBIW(i, fmap):
dst, src = i.operands
if i.misc["W"]:
assert dst is R[24]
a = fmap(composer([dst, R[25]]))
else:
a = fmap(dst)
b = fmap(src)
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
fmap[dst] = x[0 : dst.size]
if i.misc["W"]:
assert x.size == 16
fmap[R[25]] = x[8:16]
@__pc
def i_COM(i, fmap):
dst, src = i.operands
a = cst(0xFF, 8)
b = fmap(dst)
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
fmap[dst] = x
@__pc
def i_NEG(i, fmap):
dst = i.operands[0]
a = cst(0, dst.size)
b = fmap(dst)
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
fmap[dst] = x
@__pc
def i_DEC(i, fmap):
dst = i.operands[0]
a = fmap(dst)
b = cst(1, dst.size)
x = a - b
fmap[zf] = x == 0
fmap[nf] = x.bit(7)
fmap[vf] = a == cst(0x80, 8)
fmap[sf] = fmap[nf] ^ fmap[vf]
fmap[dst] = x
@__pc
def i_CPC(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
_c = fmap[cf]
__nopc(i_CP)(i, fmap)
a = fmap(a - b)
b = tst(_c, cst(1, a.size), cst(0, a.size))
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
@__pc
def i_SBC(i, fmap):
dst, src = i.operands
_c = fmap[cf]
__nopc(i_SUB)(i, fmap)
a = fmap(dst)
b = tst(_c, cst(1, a.size), cst(0, a.size))
x = a - b
__setflags__A(i, fmap, a, b, x, neg=True)
fmap[dst] = x
i_SBCI = i_SBC
@__pc
def i_AND(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a & b
__setflags__L(i, fmap, a, b, x)
fmap[dst] = x
i_ANDI = i_AND
@__pc
def i_OR(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a | b
__setflags__L(i, fmap, a, b, x)
fmap[dst] = x
i_ORI = i_OR
@__pc
def i_EOR(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a ^ b
__setflags__L(i, fmap, a, b, x)
fmap[dst] = x
@__pc
def i_MUL(i, fmap):
dst, src = i.operands
a = fmap(dst)
b = fmap(src)
x = a ** b
fmap[cf] = x[15:16]
fmap[zf] = x == 0
fmap[R[0]] = x[0:8]
fmap[R[1]] = x[8:16]
# shift/rotate instructions:
############################
@__pc
def i_LSL(i, fmap):
dst = i.operands[0]
a = fmap(dst)
fmap[cf] = a.bit(7)
x = a << 1
__setflags__S(i, fmap, a, x)
fmap[dst] = x
@__pc
def i_LSR(i, fmap):
dst = i.operands[0]
a = fmap(dst)
fmap[cf] = a.bit(0)
x = a >> 1
__setflags__S(i, fmap, a, x)
fmap[dst] = x
@__pc
def i_ASR(i, fmap):
dst = i.operands[0]
a = fmap(dst)
fmap[cf] = a.bit(0)
x = a & 0x80
x |= a >> 1
__setflags__S(i, fmap, a, x)
fmap[dst] = x
@__pc
def i_ROL(i, fmap):
dst = i.operands[0]
a = fmap(dst)
c = fmap[cf].zeroextend(a.size)
fmap[cf] = a.bit(7)
x = a << 1
x |= c
__setflags__S(i, fmap, a, x)
fmap[dst] = x
@__pc
def i_ROR(i, fmap):
dst = i.operands[0]
a = fmap(dst)
c = fmap[cf]
fmap[cf] = a.bit(0)
x = composer([cst(0, 7), c])
x |= a >> 1
__setflags__S(i, fmap, a, x)
fmap[dst] = x
# bit instructions:
###################
@__pc
def i_SWAP(i, fmap):
b = i.operands[0]
x = fmap(b)
fmap[b] = composer([x[4:8], x[0:4]])
@__pc
def i_BCLR(i, fmap):
b = i.operands[0]
fmap[b] = bit0
@__pc
def i_BSET(i, fmap):
b = i.operands[0]
fmap[b] = bit1
@__pc
def i_BST(i, fmap):
b = i.operands[0]
fmap[tf] = fmap(b)
@__pc
def i_BLD(i, fmap):
b = i.operands[0]
fmap[b] = fmap(tf)
# stack instructions:
#####################
@__pc
def i_POP(i, fmap):
dst = i.operands[0]
_pop_(fmap, dst)
@__pc
def i_PUSH(i, fmap):
src = i.operands[0]
_push_(fmap, src)
# load-store instructions:
##########################
@__pc
def i_LD(i, fmap):
dst, src = i.operands
if i.misc["flg"] == -1:
fmap[src] = fmap(src - 1)
fmap[dst] = fmap(mem(src, dst.size))
if i.misc["flg"] == 1:
fmap[src] = fmap(src + 1)
i_LDS = i_LD
i_LDD = i_LD
@__pc
def i_ST(i, fmap):
dst, src = i.operands
if i.misc["flg"] == -1:
fmap[dst] = fmap(dst - 1)
adr = fmap(dst)
fmap[ptr(adr)] = fmap(src)
if i.misc["flg"] == 1:
fmap[dst] = fmap(dst + 1)
i_STS = i_ST
i_STD = i_ST
@__pc
def i_MOV(i, fmap):
dst, src = i.operands
fmap[dst] = fmap(src)
i_LDI = i_MOV
@__pc
def i_MOVW(i, fmap):
dst, src = i.operands
fmap[dst] = fmap(src)
nd = R[R.index(dst) + 1]
nr = R[R.index(src) + 1]
fmap[nd] = fmap(nr)
@__pc
def i_SPM(i, fmap):
fmap[mem(Z, 16)] = fmap(composer([R[0], R[1]]))
@__pc
def i_LPM(i, fmap):
try:
dst, src = i.operands
except ValueError:
dst, src = R[0], Z
fmap[dst] = fmap(Z)
if i.misc["flg"] == 1:
fmap[Z] = fmap(Z + 1)
# control-flow instructions:
############################
@__pc
def i_BRBC(i, fmap):
b, offset = i.operands
fmap[pc] = fmap(tst(b == bit0, pc + (2 * offset), pc))
@__pc
def i_BRBS(i, fmap):
b, offset = i.operands
fmap[pc] = fmap(tst(b == bit1, pc + (2 * offset), pc))
@__pc
def i_CALL(i, fmap):
adr = i.operands[0]
_push_(fmap, fmap(pc))
fmap[pc] = fmap(2 * adr)
@__pc
def i_JMP(i, fmap):
adr = i.operands[0]
fmap[pc] = fmap(2 * adr)
@__pc
def i_RET(i, fmap):
_pop_(fmap, pc)
@__pc
def i_RETI(i, fmap):
_pop_(fmap, pc)
fmap[i_] = bit1
@__pc
def i_RCALL(i, fmap):
offset = i.operands[0]
_push_(fmap, fmap(pc))
fmap[pc] = fmap(pc + (2 * offset))
@__pc
def i_RJMP(i, fmap):
offset = i.operands[0]
fmap[pc] = fmap(pc + (2 * offset))
@__pc
def i_EICALL(i, fmap):
raise NotImplementedError
@__pc
def i_EIJMP(i, fmap):
raise NotImplementedError
@__pc
def i_ICALL(i, fmap):
_push_(fmap, fmap(pc))
fmap[pc] = fmap(Z)
@__pc
def i_IJMP(i, fmap):
fmap[pc] = fmap(Z)
| LRGH/amoco | amoco/arch/avr/asm.py | Python | gpl-2.0 | 10,020 | 0.000499 |
"""T2W modality class."""
import warnings
import numpy as np
import SimpleITK as sitk
from .standalone_modality import StandaloneModality
from ..utils.validation import check_path_data
class T2WModality(StandaloneModality):
"""Class to handle T2W-MRI modality.
Parameters
----------
path_data : str, optional (default=None)
The folder in which the data are stored.
Attributes
----------
path_data_ : string
Location of the data.
data_ : ndarray, shape (Y, X, Z)
The different volume of the T2W volume. The data are saved in
Y, X, Z ordered.
metadata_ : dict
Dictionnary which contain the MRI sequence information. Note that the
information are given in the original ordering (X, Y, Z), which is
different from the organisation of `data_` which is (Y, X, Z).
pdf_ : list, length (n_serie)
List of the PDF for each serie.
bin_ : list of ndarray, length (n_serie)
List of the bins used to plot the pdfs.
max_ : float
Maximum intensity of the T2W-MRI volume.
min_ : float
Minimum intensity of the T2W-MRI volume.
"""
def __init__(self, path_data=None):
super(T2WModality, self).__init__(path_data=path_data)
def get_pdf(self, roi_data=None, nb_bins='auto'):
""" Extract the a list of pdf related with the data.
Parameters
----------
roi_data : tuple
Indices of elements to consider while computing the histogram.
The ROI is a 3D volume which will be used for each time serie.
nb_bins : list of int or str, optional (default='auto')
The numbers of bins to use to compute the histogram.
The possibilities are:
- If 'auto', the number of bins is found at fitting time.
- If None, the number of bins used is the one at the last
call of update histogram.
- Otherwise, a list of integer needs to be given.
Returns
-------
pdf_data : ndarray, length (n_serie)
List of the pdf with the associated series.
bin_data : list of ndarray, length (n_series + 1)
List of the bins associated with the list of pdf.
"""
# Check that the data have been read
if self.data_ is None:
raise ValueError('You need to load the data first. Refer to the'
' function read_data_from_path().')
# Build the histogram corresponding to the current volume
# Find how many bins do we need
if isinstance(nb_bins, basestring):
if nb_bins == 'auto':
nb_bins = int(np.round(self.max_ - self.min_))
else:
raise ValueError('Unknown parameters for `nb_bins.`')
elif isinstance(nb_bins, int):
pass
elif nb_bins is None:
nb_bins = self.nb_bins_
else:
raise ValueError('Unknown type for the parameters `nb_bins`.')
if roi_data is None:
pdf_data, bin_data = np.histogram(self.data_,
bins=nb_bins,
density=True)
else:
pdf_data, bin_data = np.histogram(self.data_[roi_data],
bins=nb_bins,
density=True)
return pdf_data, bin_data
def update_histogram(self, nb_bins=None):
"""Update the PDF and the first-order statistics.
Parameters
----------
nb_bins : int or None, optional (default=None)
The numbers of bins to use to compute the histogram.
The possibilities are:
- If None, the number of bins found at reading will be used.
- If 'auto', the number of bins is found at fitting time.
- Otherwise, an integer needs to be given.
Returns
-------
self : object
Returns self.
Notes
-----
There is the possibility to redifine the number of bins to use for
the histogram since it can be tricky to play with normalized data.
"""
# Check if the data have been read
if self.data_ is None:
raise ValueError('You need to read the data first. Call the'
' function read_data_from_path()')
# Compute the min and max from the T2W volume
self.max_ = np.ndarray.max(self.data_)
self.min_ = np.ndarray.min(self.data_)
# Build the histogram corresponding to the current volume
# Find how many bins do we need
if isinstance(nb_bins, basestring):
if nb_bins == 'auto':
nb_bins = int(np.round(self.max_ - self.min_))
else:
raise ValueError('Unknown parameters for `nb_bins.`')
elif nb_bins is None:
nb_bins = self.nb_bins_
self.pdf_, self.bin_ = np.histogram(self.data_,
bins=nb_bins,
density=True)
return self
def read_data_from_path(self, path_data=None):
"""Read T2W images which represent a single 3D volume.
Parameters
----------
path_data : str or None, optional (default=None)
Path to the standalone modality data.
Returns
-------
self : object
Returns self.
"""
# Check the consistency of the path data
if self.path_data_ is not None and path_data is not None:
# We will overide the path and raise a warning
warnings.warn('The data path will be overriden using the path'
' given in the function.')
self.path_data_ = check_path_data(path_data)
elif self.path_data_ is None and path_data is not None:
self.path_data_ = check_path_data(path_data)
elif self.path_data_ is None and path_data is None:
raise ValueError('You need to give a path_data from where to read'
' the data.')
# Create a reader object
reader = sitk.ImageSeriesReader()
# Find the different series present inside the folder
series = np.array(reader.GetGDCMSeriesIDs(self.path_data_))
# Check that you have more than one serie
if len(series) > 1:
raise ValueError('The number of series should not be larger than'
' 1 with standalone modality.')
# The data can be read
dicom_names_serie = reader.GetGDCMSeriesFileNames(self.path_data_)
# Set the list of files to read the volume
reader.SetFileNames(dicom_names_serie)
# Read the data for the current volume
vol = reader.Execute()
# Get a numpy volume
vol_numpy = sitk.GetArrayFromImage(vol)
# The Matlab convention is (Y, X, Z)
# The Numpy convention is (Z, Y, X)
# We have to swap these axis
# Swap Z and X
vol_numpy = np.swapaxes(vol_numpy, 0, 2)
vol_numpy = np.swapaxes(vol_numpy, 0, 1)
# Convert the volume to float
vol_numpy = vol_numpy.astype(np.float64)
# We can create a numpy array
self.data_ = vol_numpy
# Compute the information regarding the T2W images
# Set the number of bins that will be later used to compute
# the histogram
self.nb_bins_ = int(np.round(np.ndarray.max(self.data_) -
np.ndarray.min(self.data_)))
self.update_histogram()
# Store the DICOM metadata
self.metadata_ = {}
# Get the information that have been created by SimpleITK
# Information about data reconstruction
self.metadata_['size'] = vol.GetSize()
self.metadata_['origin'] = vol.GetOrigin()
self.metadata_['direction'] = vol.GetDirection()
self.metadata_['spacing'] = vol.GetSpacing()
# Information about the MRI sequence
# Read the first image for the sequence
im = sitk.ReadImage(dicom_names_serie[0])
self.metadata_['TR'] = float(im.GetMetaData('0018|0080'))
self.metadata_['TE'] = float(im.GetMetaData('0018|0081'))
self.metadata_['flip-angle'] = float(im.GetMetaData('0018|1314'))
return self
| glemaitre/protoclass | protoclass/data_management/t2w_modality.py | Python | gpl-2.0 | 8,496 | 0 |
import os
import errno
def delete_file(file_name, dry=False):
if dry:
print(' DRY DELETED: {}'.format(file_name))
else:
os.remove(file_name)
try:
dirname = os.path.dirname(file_name)
os.rmdir(dirname)
print(' DELETED DIR: {}'.format(dirname))
except OSError as ex:
if ex.errno != errno.ENOTEMPTY:
raise
print(' DELETED: {}'.format(file_name))
def run_dircmpdel(dircmp_file, prompt=True, dry=False):
"""
Parse dircmp file for groups of file names to be deleted.
"""
with open(dircmp_file) as fp:
lines = fp.read()
groups = lines.strip().split('\n\n')
print('Found {} duplicate groups'.format(len(groups)))
groups = (group.split('\n') for group in groups)
checked_proper_cwd = False
for group in groups:
for i, file_name in enumerate(group):
if not i:
if not checked_proper_cwd:
if not os.path.exists(file_name):
raise RuntimeError('File {} could not be found. '
'Please ensure you are in the '
'correct directory.'
''.format(file_name))
checked_proper_cwd = True
print('Deleting duplicates of {}'.format(file_name))
else:
if prompt:
while True:
resp = input(' Delete {}? '.format(file_name))
resp = resp.lower()
if resp not in ('yes', 'no'):
print('Please answer "yes" or "no".')
elif resp == 'yes':
delete_file(file_name, dry=dry)
break
elif resp == 'no':
print(' Not deleted: {}'.format(file_name))
break
else:
delete_file(file_name, dry=dry)
print()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Utility for deleting duplicate files found by dircmp'
)
parser.add_argument('file')
parser.add_argument('--no-prompt',
action='store_false', default=True, dest='prompt')
parser.add_argument('-d', '--dry',
action='store_true', default=False, dest='dry')
args = parser.parse_args()
run_dircmpdel(args.file, prompt=args.prompt, dry=args.dry)
| logston/python-dircmp | dircmppy/dircmpdel.py | Python | bsd-2-clause | 2,648 | 0.000755 |
'''
This component package and its subpackages contain wrapper and glue code for database operations.
''' | exratione/thywill-python | thywill_server/src/thywill_server/database/__init__.py | Python | mit | 105 | 0.028571 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/tabs/campaign.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import threading
import time
from king_phisher import errors
from king_phisher import find
from king_phisher import ipaddress
from king_phisher import utilities
from king_phisher.client import export
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.client.widget import managers
import advancedhttpserver
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import Gtk
import rule_engine
from smoke_zephyr.utilities import parse_timespan
UNKNOWN_LOCATION_STRING = 'N/A (Unknown)'
def _dt_field(value):
return value if value is None else utilities.datetime_utc_to_local(value)
class CampaignViewGenericTab(gui_utilities.GladeGObject):
"""
This object is meant to be subclassed by all of the tabs which load and
display information about the current campaign.
"""
label_text = 'Unknown'
"""The label of the tab for display in the GUI."""
top_gobject = 'box'
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTab, self).__init__(*args, **kwargs)
self.label = Gtk.Label(label=self.label_text)
"""The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`."""
self.is_destroyed = threading.Event()
getattr(self, self.top_gobject).connect('destroy', self.signal_destroy)
self.last_load_time = float('-inf')
"""The last time the data was loaded from the server."""
self.refresh_frequency = parse_timespan(str(self.config.get('gui.refresh_frequency', '5m')))
"""The lifetime in seconds to wait before refreshing the data from the server."""
self.loader_thread = None
"""The thread object which loads the data from the server."""
self.loader_thread_lock = threading.Lock()
"""The :py:class:`threading.Lock` object used for synchronization between the loader and main threads."""
self.loader_thread_stop = threading.Event()
"""The :py:class:`threading.Event` object used to request that the loader thread stop before completion."""
self.application.connect('campaign-set', self.signal_kpc_campaign_set)
def _sync_loader_thread(self):
"""
Synchronize the loader thread by ensuring that it is stopped. If it is
currently running, this will use :py:attr:`~.loader_thread_stop` to
request that the loader stops early.
"""
if not self.loader_thread_is_running:
return
# it's alive so tell it to stop, wait for it, then proceed
self.loader_thread_stop.set()
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.loader_thread.join(1)
@property
def rpc(self):
return self.application.rpc
@property
def loader_thread_is_running(self):
if self.loader_thread is None:
return False
return self.loader_thread.is_alive()
def load_campaign_information(self, force=True):
raise NotImplementedError()
def signal_button_clicked_refresh(self, button):
self.load_campaign_information()
def signal_destroy(self, gobject):
self.is_destroyed.set()
self.loader_thread_stop.set()
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
def signal_kpc_campaign_set(self, *_):
self.load_campaign_information()
class CampaignViewGenericTableTab(CampaignViewGenericTab):
"""
This object is meant to be subclassed by tabs which will display campaign
information of different types from specific database tables. The data in
this object is refreshed when multiple events occur and it uses an internal
timer to represent the last time the data was refreshed.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_refresh',
'entry_filter',
'label_filter_summary',
'revealer_filter',
'treeview_campaign'
)
)
node_query = None
"""
The GraphQL query used to load a particular node from the remote table. This
query is provided with a single parameter of the node's id.
"""
table_name = ''
"""The database table represented by this tab."""
table_query = None
"""
The GraphQL query used to load the desired information from the remote
table. This query is provided with the following three parameters: campaign,
count and cursor.
"""
secret_columns = ()
view_columns = ()
xlsx_worksheet_options = None
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
self.treeview_manager = managers.TreeViewManager(
treeview,
selection_mode=Gtk.SelectionMode.MULTIPLE,
cb_delete=self._prompt_to_delete_row,
cb_refresh=self.load_campaign_information
)
self.treeview_manager.set_column_titles(
self.view_column_titles,
column_offset=1,
renderers=tuple(column.cell_renderer() for column in self.view_columns)
)
for column in self.view_columns:
if isinstance(column, extras.ColumnDefinitionDatetime):
self.treeview_manager.column_views[column.title].set_fixed_width(150)
self.popup_menu = self.treeview_manager.get_popup_menu()
"""The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area."""
treeview = self.gobjects['treeview_campaign']
self._rule = None
self._rule_context = rule_engine.Context(type_resolver=rule_engine.type_resolver_from_dict(
dict((column.name, rule_engine.DataType.from_type(column.python_type)) for column in self.view_columns)
))
view_column_types = tuple(column.g_type for column in self.view_columns)
self._tv_model = Gtk.ListStore(str, *view_column_types)
self._tv_model_filter = self._tv_model.filter_new()
self._tv_model_filter.set_visible_func(self._tv_filter)
tree_model_sort = Gtk.TreeModelSort(model=self._tv_model_filter)
for idx, column in enumerate(self.view_columns, 1):
if column.sort_function is not None:
tree_model_sort.set_sort_func(idx, column.sort_function, idx)
treeview.set_model(tree_model_sort)
self.application.connect('server-connected', self.signal_kp_server_connected)
tab_config = self._tab_config
filter_revealer = self.gobjects['revealer_filter']
filter_revealer.set_reveal_child(tab_config['filter.show'])
menu_item = Gtk.CheckMenuItem.new_with_label('Show Filter')
menu_item.set_active(filter_revealer.get_reveal_child())
menu_item.connect('toggled', self.signal_toggled_show_filter)
menu_item.show()
self.popup_menu.append(menu_item)
submenu = Gtk.Menu.new()
menu_item = Gtk.MenuItem.new_with_label('Show Columns')
menu_item.set_submenu(submenu)
menu_item.show()
self.popup_menu.append(menu_item)
visisble_columns = tab_config['columns.show']
for column in self.view_columns:
if column.title in self.secret_columns:
continue
visible = visisble_columns.get(column.title, True)
self.treeview_manager.column_views[column.title].set_visible(visible)
menu_item = Gtk.CheckMenuItem.new_with_label(column.title)
menu_item.set_active(visible)
menu_item.connect('toggled', self.signal_toggled_show_column, column)
menu_item.show()
submenu.append(menu_item)
visisble_columns[column.title] = visible
def __async_rpc_cb_server_event_db_inserted(self, results):
node = results['db']['node']
row_data = (str(node['id']),) + tuple(self.format_node_data(node))
self._tv_model.append(row_data)
def __async_rpc_cb_server_event_db_updated(self, results):
node = results['db']['node']
if node is None:
self.logger.warning('received server db event: updated but could not fetch the remote data')
return
node_id = str(node['id'])
ti = gui_utilities.gtk_list_store_search(self._tv_model, node_id)
if ti is None:
self.logger.warning("received server db event: updated for non-existent row {0}:{1}".format(self.table_name, node_id))
return
row_data = tuple(self.format_node_data(node))
for idx, cell_data in enumerate(row_data, 1):
self._tv_model[ti][idx] = cell_data
@property
def _tab_config(self):
name = 'campaign.tab.' + self.table_name
if name not in self.config:
self.config[name] = {
'columns.show': {column.title: True for column in self.view_columns},
'filter.show': False
}
return self.config[name]
def signal_entry_changed_filter(self, entry):
text = entry.get_text()
self._rule = None
label = self.gobjects['label_filter_summary']
if text:
try:
self._rule = rule_engine.Rule(text, context=self._rule_context)
except rule_engine.EngineError:
entry.set_property('secondary-icon-stock', 'gtk-dialog-warning')
return
entry.set_property('secondary-icon-stock', None)
self._tv_model_filter.refilter()
visible_records = len(self._tv_model_filter)
all_records = len(self._tv_model)
label.set_text("Showing {:,} of {:,} {} ({:.1f}%)".format(
visible_records,
all_records,
self.label_text.lower(),
((visible_records / all_records) if all_records > 0 else 1.0) * 100
))
def signal_toggled_show_column(self, widget, column):
visible = widget.get_active()
self.treeview_manager.column_views[column.title].set_visible(visible)
self._tab_config['columns.show'][column.title] = visible
def signal_toggled_show_filter(self, widget):
active = widget.get_active()
self.gobjects['revealer_filter'].set_reveal_child(active)
if active:
self.gobjects['entry_filter'].grab_focus()
self._tab_config['filter.show'] = active
def signal_kp_server_connected(self, _):
event_id = 'db-' + self.table_name.replace('_', '-')
server_events = self.application.server_events
if not server_events:
return
server_events.subscribe(event_id, ('deleted', 'inserted', 'updated'), ('id', 'campaign_id'))
server_events.connect(event_id, self.signal_server_event_db)
def signal_server_event_db(self, _, event_type, rows):
for row in rows:
if str(row.campaign_id) != self.config['campaign_id']:
continue
if event_type == 'deleted':
ti = gui_utilities.gtk_list_store_search(self._tv_model, str(row.id))
if ti is None:
self.logger.warning("received server db event: deleted for non-existent row {0}:{1}".format(self.table_name, str(row.id)))
else:
gui_utilities.glib_idle_add_wait(self._tv_model.remove, ti)
elif event_type == 'inserted':
self.rpc.async_graphql(
self.node_query,
query_vars={'id': str(row.id)},
on_success=self.__async_rpc_cb_server_event_db_inserted,
when_idle=True
)
elif event_type == 'updated':
self.rpc.async_graphql(
self.node_query,
query_vars={'id': str(row.id)},
on_success=self.__async_rpc_cb_server_event_db_updated,
when_idle=True
)
def _export_lock(self):
show_dialog_warning = lambda: gui_utilities.show_dialog_warning('Export Failed', self.parent, 'Can not export data while loading.')
if not self.loader_thread_lock.acquire(False):
show_dialog_warning()
return False
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.loader_thread_lock.release()
show_dialog_warning()
return False
return True
def _prompt_to_delete_row(self, treeview, _):
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent)
return
model = treeview.get_model()
row_ids = [model.get_value(ti, 0) for ti in gui_utilities.gtk_treeview_selection_iterate(treeview)]
if len(row_ids) == 0:
return
elif len(row_ids) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(row_ids))
if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'):
return
self.application.emit(self.table_name[:-1].replace('_', '-') + '-delete', row_ids)
def _tv_filter(self, model, tree_iter, _):
if self._rule is None:
return True
model_row = model[tree_iter]
row = {}
for idx, column in enumerate(self.view_columns, 1):
row[column.name] = model_row[idx]
try:
return self._rule.matches(row)
except rule_engine.EngineError:
return True
def format_node_data(self, node):
"""
This method is overridden by subclasses to format the raw node
data returned from the server. The length of the list must equal
the number of columns in the table. This method is called for
each node in the remote table by the loader thread.
:param dict node: The node from a GraphQL query representing data for this table.
:return: The formatted row data.
:rtype: list
"""
raise NotImplementedError()
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewGenericTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to
check if the information is stale. If the local data is not stale,
this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
with self.loader_thread_lock:
self._sync_loader_thread()
self.loader_thread_stop.clear()
self._tv_model.clear()
self.loader_thread = utilities.Thread(target=self.loader_thread_routine, args=(self._tv_model,))
self.loader_thread.daemon = True
self.loader_thread.start()
return
def loader_thread_routine(self, store):
"""
The loading routine to be executed within a thread.
:param store: The store object to place the new data.
:type store: :py:class:`Gtk.ListStore`
"""
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False))
campaign_id = self.config['campaign_id']
count = 500
page_info = {'endCursor': None, 'hasNextPage': True}
while page_info['hasNextPage']:
if self.rpc is None:
break
try:
results = self.rpc.graphql(self.table_query, {'campaign': campaign_id, 'count': count, 'cursor': page_info['endCursor']})
except errors.KingPhisherGraphQLQueryError as error:
self.logger.error('graphql error: ' + error.message)
raise
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
for edge in results['db']['campaign'][self.table_name]['edges']:
node = edge['node']
row_data = (str(node['id']),) + tuple(self.format_node_data(node))
gui_utilities.glib_idle_add_wait(store.append, row_data)
page_info = results['db']['campaign'][self.table_name]['pageInfo']
if self.is_destroyed.is_set():
return
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True))
self.last_load_time = time.time()
def signal_button_clicked_export(self, button):
self.export_table_to_csv(filtered=True)
def export_table_to_csv(self, filtered=False):
"""Export the data represented by the view to a CSV file."""
if not self._export_lock():
return
dialog = extras.FileChooserDialog('Export Data', self.parent)
file_name = self.config['campaign_name'] + '.csv'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
self.loader_thread_lock.release()
return
destination_file = response['target_path']
if filtered:
store = self._tv_model_filter
else:
store = self._tv_model
columns = dict(enumerate(('UID',) + self.view_column_titles))
export.liststore_to_csv(store, destination_file, columns)
self.loader_thread_lock.release()
def export_table_to_xlsx_worksheet(self, worksheet, title_format):
"""
Export the data represented by the view to an XLSX worksheet.
:param worksheet: The destination sheet for the store's data.
:type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet`
:param title_format: The formatting to use for the title row.
:type title_format: :py:class:`xlsxwriter.format.Format`
"""
if not self._export_lock():
return
store = self._tv_model
columns = dict(enumerate(('UID',) + self.view_column_titles))
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(20,) + tuple(column.width for column in self.view_columns),
title=self.label_text
)
export.liststore_to_xlsx_worksheet(store, worksheet, columns, title_format, xlsx_options=xlsx_worksheet_options)
self.loader_thread_lock.release()
@property
def view_column_titles(self):
return tuple(column.title for column in self.view_columns)
class CampaignViewDeaddropTab(CampaignViewGenericTableTab):
"""Display campaign information regarding dead drop connections."""
table_name = 'deaddrop_connections'
label_text = 'Deaddrop Connections'
node_query = """\
query getDeaddropConnection($id: String!) {
db {
node: deaddropConnection(id: $id) {
id
deaddropDeployment { destination }
count
ip
localUsername
localHostname
localIpAddresses
firstSeen
lastSeen
}
}
}
"""
table_query = """\
query getDeaddropConnections($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
deaddrop_connections: deaddropConnections(first: $count, after: $cursor) {
total
edges {
node {
id
deaddropDeployment {
id
destination
}
count
ip
localUsername
localHostname
localIpAddresses
firstSeen
lastSeen
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
extras.ColumnDefinitionString('Destination'),
extras.ColumnDefinitionInteger('Visit Count'),
extras.ColumnDefinitionString('IP Address', width=25),
extras.ColumnDefinitionString('Username'),
extras.ColumnDefinitionString('Hostname'),
extras.ColumnDefinitionString('Local IP Addresses'),
extras.ColumnDefinitionDatetime('First Hit'),
extras.ColumnDefinitionDatetime('Last Hit'),
)
def format_node_data(self, connection):
deaddrop_destination = connection['deaddropDeployment']['destination']
if not deaddrop_destination:
return None
row = (
deaddrop_destination,
connection['count'],
connection['ip'],
connection['localUsername'],
connection['localHostname'],
connection['localIpAddresses'],
_dt_field(connection['firstSeen']),
_dt_field(connection['lastSeen'])
)
return row
class CampaignViewCredentialsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding submitted credentials."""
table_name = 'credentials'
label_text = 'Credentials'
node_query = """\
query getCredential($id: String!) {
db {
node: credential(id: $id) {
id
submitted
message { targetEmail }
username
password
mfaToken
regexValidated
}
}
}
"""
table_query = """\
query getCredentials($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
credentials(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
submitted
username
password
mfaToken
regexValidated
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
secret_columns = ('Password', 'MFA Token')
view_columns = (
extras.ColumnDefinitionString('Email Address'),
extras.ColumnDefinitionDatetime('Submitted'),
extras.ColumnDefinitionString('Validation', width=20),
extras.ColumnDefinitionString('Username'),
extras.ColumnDefinitionString('Password'),
extras.ColumnDefinitionString('MFA Token', width=20),
)
def __init__(self, *args, **kwargs):
super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
for column_name in self.secret_columns:
treeview.get_column(self.view_column_titles.index(column_name)).set_property('visible', False)
def format_node_data(self, node):
regex_validated = ''
if node['regexValidated'] is not None:
regex_validated = 'Pass' if node['regexValidated'] else 'Fail'
row = (
node['message']['targetEmail'],
_dt_field(node['submitted']),
regex_validated,
node['username'],
node['password'],
node['mfaToken']
)
return row
def signal_button_toggled_show_secrets(self, button):
treeview = self.gobjects['treeview_campaign']
visible = button.get_property('active')
for column_name in self.secret_columns:
treeview.get_column(self.view_column_titles.index(column_name)).set_property('visible', visible)
class CampaignViewDashboardTab(CampaignViewGenericTab):
"""Display campaign information on a graphical dash board."""
dependencies = gui_utilities.GladeDependencies(
children=(
'box_top_left',
'box_top_right',
'box_bottom',
'scrolledwindow_top_left',
'scrolledwindow_top_right',
'scrolledwindow_bottom'
)
)
label_text = 'Dashboard'
"""The tabs label for display in the GUI."""
def __init__(self, *args, **kwargs):
super(CampaignViewDashboardTab, self).__init__(*args, **kwargs)
self.graphs = []
"""The :py:class:`.CampaignGraph` classes represented on the dash board."""
dash_ports = {
# dashboard position, (width, height)
'top_left': (380, 200),
'top_right': (380, 200),
'bottom': (760, 200)
}
for dash_port, details in dash_ports.items():
graph_name = self.config['dashboard.' + dash_port]
cls = graphs.get_graph(graph_name)
if not cls:
self.logger.warning('could not get graph: ' + graph_name)
logo_file_path = find.data_file('king-phisher-icon.svg')
if logo_file_path:
image = Gtk.Image.new_from_pixbuf(GdkPixbuf.Pixbuf.new_from_file_at_size(logo_file_path, 128, 128))
image.show()
self.gobjects['scrolledwindow_' + dash_port].add(image)
continue
graph_inst = cls(self.application, details, getattr(self, self.top_gobject).get_style_context())
self.gobjects['scrolledwindow_' + dash_port].add(graph_inst.canvas)
self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0)
self.graphs.append(graph_inst)
self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency))
GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine)
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the :py:attr:`~.last_load_time` is compared with
the :py:attr:`~.refresh_frequency` to check if the information is stale.
If the local data is not stale, this function will return without
updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if not self.application.rpc:
self.logger.warning('skipping load_campaign_information because rpc is not initialized')
return
with self.loader_thread_lock:
self._sync_loader_thread()
self.loader_thread_stop.clear()
self.loader_thread = utilities.Thread(target=self.loader_thread_routine)
self.loader_thread.daemon = True
self.loader_thread.start()
def loader_idle_routine(self):
"""The routine which refreshes the campaign data at a regular interval."""
if self.rpc and not self.loader_thread_is_running:
self.logger.debug('idle loader routine called')
self.load_campaign_information()
return True
def loader_thread_routine(self):
"""The loading routine to be executed within a thread."""
if not 'campaign_id' in self.config:
return
try:
campaign = self.application.get_graphql_campaign()
except (ConnectionError, advancedhttpserver.RPCConnectionError):
return
if campaign is None:
return
info_cache = {}
for graph in self.graphs:
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
info_cache.update(gui_utilities.glib_idle_add_wait(lambda g=graph: g.refresh(info_cache, self.loader_thread_stop)))
else:
self.last_load_time = time.time()
class CampaignViewVisitsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding incoming visitors."""
table_name = 'visits'
label_text = 'Visits'
node_query = """\
query getVisit($id: String!) {
db {
node: visit(id: $id) {
id
message { targetEmail }
ip
count
userAgent
ipGeoloc { city }
firstSeen
lastSeen
}
}
}
"""
table_query = """\
query getVisits($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
visits(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
ip
count
userAgent
ipGeoloc { city }
firstSeen
lastSeen
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
extras.ColumnDefinitionString('Email Address'),
extras.ColumnDefinitionString('IP Address', width=25),
extras.ColumnDefinitionInteger('Visit Count'),
extras.ColumnDefinitionString('Visitor User Agent', width=90),
extras.ColumnDefinitionString('Visitor Location'),
extras.ColumnDefinitionDatetime('First Visit'),
extras.ColumnDefinitionDatetime('Last Visit'),
)
def format_node_data(self, node):
geo_location = UNKNOWN_LOCATION_STRING
visitor_ip = node['ip']
if visitor_ip is None:
visitor_ip = ''
else:
visitor_ip = ipaddress.ip_address(visitor_ip)
if visitor_ip.is_loopback:
geo_location = 'N/A (Loopback)'
elif visitor_ip.is_private:
geo_location = 'N/A (Private)'
elif isinstance(visitor_ip, ipaddress.IPv6Address):
geo_location = 'N/A (IPv6 Address)'
elif node['ipGeoloc']:
geo_location = node['ipGeoloc']['city']
row = (
node['message']['targetEmail'],
str(visitor_ip),
node['count'],
node['userAgent'],
geo_location,
_dt_field(node['firstSeen']),
_dt_field(node['lastSeen'])
)
return row
class CampaignViewMessagesTab(CampaignViewGenericTableTab):
"""Display campaign information regarding sent messages."""
table_name = 'messages'
label_text = 'Messages'
node_query = """\
query getMessage($id: String!) {
db {
node: message(id: $id) {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
deliveryStatus
deliveryDetails
}
}
}
"""
table_query = """\
query getMessages($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
messages(first: $count, after: $cursor) {
total
edges {
node {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
deliveryStatus
deliveryDetails
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
extras.ColumnDefinitionString('Email Address'),
extras.ColumnDefinitionDatetime('Sent'),
extras.ColumnDefinitionString('Trained', width=15),
extras.ColumnDefinitionString('Department'),
extras.ColumnDefinitionDatetime('Opened'),
extras.ColumnDefinitionString('Opener IP Address', width=25),
extras.ColumnDefinitionString('Opener User Agent', width=90),
extras.ColumnDefinitionString('Delivery Status'),
extras.ColumnDefinitionString('Delivery Details')
)
def format_node_data(self, node):
department = node['companyDepartment']
if department:
department = department['name']
row = (
node['targetEmail'],
_dt_field(node['sent']),
('Yes' if node['trained'] else ''),
department,
_dt_field(node['opened']),
node['openerIp'],
node['openerUserAgent'],
node['deliveryStatus'],
node['deliveryDetails']
)
return row
class CampaignViewTab(object):
"""
The King Phisher client top-level 'View Campaign' tab. This object
manages the sub-tabs which display all the information regarding
the current campaign.
"""
def __init__(self, parent, application):
"""
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
:param application: The main client application instance.
:type application: :py:class:`Gtk.Application`
"""
self.parent = parent
self.application = application
self.config = application.config
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.box = Gtk.Box()
self.box.set_property('orientation', Gtk.Orientation.VERTICAL)
self.box.show()
self.label = Gtk.Label(label='View Campaign')
"""The :py:class:`Gtk.Label` representing this tabs name."""
self.notebook = Gtk.Notebook()
""" The :py:class:`Gtk.Notebook` for holding sub-tabs."""
self.notebook.connect('switch-page', self.signal_notebook_switch_page)
self.notebook.set_scrollable(True)
self.box.pack_start(self.notebook, True, True, 0)
self.tabs = utilities.FreezableDict()
"""A dict object holding the sub tabs managed by this object."""
current_page = self.notebook.get_current_page()
self.last_page_id = current_page
if graphs.has_matplotlib:
self.logger.info('matplotlib is installed, dashboard will be available')
dashboard_tab = CampaignViewDashboardTab(application)
self.tabs['dashboard'] = dashboard_tab
self.notebook.append_page(dashboard_tab.box, dashboard_tab.label)
else:
self.logger.warning('matplotlib is not installed, dashboard will not be available')
messages_tab = CampaignViewMessagesTab(application)
self.tabs['messages'] = messages_tab
self.notebook.append_page(messages_tab.box, messages_tab.label)
visits_tab = CampaignViewVisitsTab(application)
self.tabs['visits'] = visits_tab
self.notebook.append_page(visits_tab.box, visits_tab.label)
credentials_tab = CampaignViewCredentialsTab(application)
self.tabs['credentials'] = credentials_tab
self.notebook.append_page(credentials_tab.box, credentials_tab.label)
if self.config.get('gui.show_deaddrop', False):
deaddrop_connections_tab = CampaignViewDeaddropTab(application)
self.tabs['deaddrop_connections'] = deaddrop_connections_tab
self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label)
self.tabs.freeze()
for tab in self.tabs.values():
tab.box.show()
self.notebook.show()
def signal_notebook_switch_page(self, notebook, current_page, index):
if not hasattr(self.parent, 'rpc'):
return
#previous_page = notebook.get_nth_page(self.last_page_id)
self.last_page_id = index
for tab in self.tabs.values():
if current_page != tab.box:
continue
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information(force=False)
| securestate/king-phisher | king_phisher/client/tabs/campaign.py | Python | bsd-3-clause | 32,510 | 0.027592 |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wlauto.core.resource import Resource
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class ExtensionAsset(File):
name = 'extension_asset'
def __init__(self, owner, path):
super(ExtensionAsset, self).__init__(owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
| Sticklyman1936/workload-automation | wlauto/common/resources.py | Python | apache-2.0 | 1,635 | 0.001223 |
"""This module implements the Lovins stemming algorithm. Use the ``stem()``
function::
stemmed_word = stem(word)
"""
from whoosh.util.collections2 import defaultdict
# Conditions
def A(base):
# A No restrictions on stem
return True
def B(base):
# B Minimum stem length = 3
return len(base) > 2
def C(base):
# C Minimum stem length = 4
return len(base) > 3
def D(base):
# D Minimum stem length = 5
return len(base) > 4
def E(base):
# E Do not remove ending after e
return base[-1] != "e"
def F(base):
# F Minimum stem length = 3 and do not remove ending after e
return len(base) > 2 and base[-1] != "e"
def G(base):
# G Minimum stem length = 3 and remove ending only after f
return len(base) > 2 and base[-1] == "f"
def H(base):
# H Remove ending only after t or ll
c1, c2 = base[-2:]
return c2 == "t" or (c2 == "l" and c1 == "l")
def I(base):
# I Do not remove ending after o or e
c = base[-1]
return c != "o" and c != "e"
def J(base):
# J Do not remove ending after a or e
c = base[-1]
return c != "a" and c != "e"
def K(base):
# K Minimum stem length = 3 and remove ending only after l, i or u*e
c = base[-1]
cc = base[-3]
return len(base) > 2 and (c == "l" or c == "i" or (c == "e" and cc == "u"))
def L(base):
# L Do not remove ending after u, x or s, unless s follows o
c1, c2 = base[-2:]
return c2 != "u" and c2 != "x" and (c2 != "s" or c1 == "o")
def M(base):
# M Do not remove ending after a, c, e or m
c = base[-1]
return c != "a" and c!= "c" and c != "e" and c != "m"
def N(base):
# N Minimum stem length = 4 after s**, elsewhere = 3
return len(base) > 3 or (len(base) == 3 and base[-1] != "s")
def O(base):
# O Remove ending only after l or i
c = base[-1]
return c == "l" or c == "i"
def P(base):
# P Do not remove ending after c
return base[-1] != "c"
def Q(base):
# Q Minimum stem length = 3 and do not remove ending after l or n
c = base[-1]
return len(base) > 2 and (c != "l" and c != "n")
def R(base):
# R Remove ending only after n or r
c = base[-1]
return c == "n" or c == "r"
def S(base):
# S Remove ending only after dr or t, unless t follows t
l2 = base[-2]
return l2 == "rd" or (base[-1] == "t" and l2 != "tt")
def T(base):
# T Remove ending only after s or t, unless t follows o
c1, c2 = base[-2:]
return c2 == "s" or (c2 == "t" and c1 != "o")
def U(base):
# U Remove ending only after l, m, n or r
c = base[-1]
return c == "l" or c == "m" or c == "n" or c == "r"
def V(base):
# V Remove ending only after c
return base[-1] == "c"
def W(base):
# W Do not remove ending after s or u
c = base[-1]
return c != "s" and c != "u"
def X(base):
# X Remove ending only after l, i or u*e
c = base[-1]
cc = base[-3]
return c == "l" or c == "i" or (c == "e" and cc == "u")
def Y(base):
# Y Remove ending only after in
return base[-2:] == "in"
def Z(base):
# Z Do not remove ending after f
return base[-1] != "f"
def a(base):
# a Remove ending only after d, f, ph, th, l, er, or, es or t
c = base[-1]
l2 = base[-2:]
return (c == "d" or c == "f" or l2 == "ph" or l2 == "th" or c == "l"
or l2 == "er" or l2 == "or" or l2 == "es" or c == "t")
def b(base):
# b Minimum stem length = 3 and do not remove ending after met or ryst
return len(base) > 2 and not (base.endswith("met")
or base.endswith("ryst"))
def c(base):
# c Remove ending only after l
return base[-1] == "l"
# Endings
m = [None] * 12
m[11] = dict((
("alistically", B),
("arizability", A),
("izationally", B)))
m[10] = dict((
("antialness", A),
("arisations", A),
("arizations", A),
("entialness", A)))
m[9] = dict((
("allically", C),
("antaneous", A),
("antiality", A),
("arisation", A),
("arization", A),
("ationally", B),
("ativeness", A),
("eableness", E),
("entations", A),
("entiality", A),
("entialize", A),
("entiation", A),
("ionalness", A),
("istically", A),
("itousness", A),
("izability", A),
("izational", A)))
m[8] = dict((
("ableness", A),
("arizable", A),
("entation", A),
("entially", A),
("eousness", A),
("ibleness", A),
("icalness", A),
("ionalism", A),
("ionality", A),
("ionalize", A),
("iousness", A),
("izations", A),
("lessness", A)))
m[7] = dict((
("ability", A),
("aically", A),
("alistic", B),
("alities", A),
("ariness", E),
("aristic", A),
("arizing", A),
("ateness", A),
("atingly", A),
("ational", B),
("atively", A),
("ativism", A),
("elihood", E),
("encible", A),
("entally", A),
("entials", A),
("entiate", A),
("entness", A),
("fulness", A),
("ibility", A),
("icalism", A),
("icalist", A),
("icality", A),
("icalize", A),
("ication", G),
("icianry", A),
("ination", A),
("ingness", A),
("ionally", A),
("isation", A),
("ishness", A),
("istical", A),
("iteness", A),
("iveness", A),
("ivistic", A),
("ivities", A),
("ization", F),
("izement", A),
("oidally", A),
("ousness", A)))
m[6] = dict((
("aceous", A),
("acious", B),
("action", G),
("alness", A),
("ancial", A),
("ancies", A),
("ancing", B),
("ariser", A),
("arized", A),
("arizer", A),
("atable", A),
("ations", B),
("atives", A),
("eature", Z),
("efully", A),
("encies", A),
("encing", A),
("ential", A),
("enting", C),
("entist", A),
("eously", A),
("ialist", A),
("iality", A),
("ialize", A),
("ically", A),
("icance", A),
("icians", A),
("icists", A),
("ifully", A),
("ionals", A),
("ionate", D),
("ioning", A),
("ionist", A),
("iously", A),
("istics", A),
("izable", E),
("lessly", A),
("nesses", A),
("oidism", A)))
m[5] = dict((
("acies", A),
("acity", A),
("aging", B),
("aical", A),
("alist", A),
("alism", B),
("ality", A),
("alize", A),
("allic", b),
("anced", B),
("ances", B),
("antic", C),
("arial", A),
("aries", A),
("arily", A),
("arity", B),
("arize", A),
("aroid", A),
("ately", A),
("ating", I),
("ation", B),
("ative", A),
("ators", A),
("atory", A),
("ature", E),
("early", Y),
("ehood", A),
("eless", A),
("elily", A),
("ement", A),
("enced", A),
("ences", A),
("eness", E),
("ening", E),
("ental", A),
("ented", C),
("ently", A),
("fully", A),
("ially", A),
("icant", A),
("ician", A),
("icide", A),
("icism", A),
("icist", A),
("icity", A),
("idine", I),
("iedly", A),
("ihood", A),
("inate", A),
("iness", A),
("ingly", B),
("inism", J),
("inity", c),
("ional", A),
("ioned", A),
("ished", A),
("istic", A),
("ities", A),
("itous", A),
("ively", A),
("ivity", A),
("izers", F),
("izing", F),
("oidal", A),
("oides", A),
("otide", A),
("ously", A)))
m[4] = dict((
("able", A),
("ably", A),
("ages", B),
("ally", B),
("ance", B),
("ancy", B),
("ants", B),
("aric", A),
("arly", K),
("ated", I),
("ates", A),
("atic", B),
("ator", A),
("ealy", Y),
("edly", E),
("eful", A),
("eity", A),
("ence", A),
("ency", A),
("ened", E),
("enly", E),
("eous", A),
("hood", A),
("ials", A),
("ians", A),
("ible", A),
("ibly", A),
("ical", A),
("ides", L),
("iers", A),
("iful", A),
("ines", M),
("ings", N),
("ions", B),
("ious", A),
("isms", B),
("ists", A),
("itic", H),
("ized", F),
("izer", F),
("less", A),
("lily", A),
("ness", A),
("ogen", A),
("ward", A),
("wise", A),
("ying", B),
("yish", A)))
m[3] = dict((
("acy", A),
("age", B),
("aic", A),
("als", b),
("ant", B),
("ars", O),
("ary", F),
("ata", A),
("ate", A),
("eal", Y),
("ear", Y),
("ely", E),
("ene", E),
("ent", C),
("ery", E),
("ese", A),
("ful", A),
("ial", A),
("ian", A),
("ics", A),
("ide", L),
("ied", A),
("ier", A),
("ies", P),
("ily", A),
("ine", M),
("ing", N),
("ion", Q),
("ish", C),
("ism", B),
("ist", A),
("ite", a),
("ity", A),
("ium", A),
("ive", A),
("ize", F),
("oid", A),
("one", R),
("ous", A)))
m[2] = dict((
("ae", A),
("al", b),
("ar", X),
("as", B),
("ed", E),
("en", F),
("es", E),
("ia", A),
("ic", A),
("is", A),
("ly", B),
("on", S),
("or", T),
("um", U),
("us", V),
("yl", R),
("s'", A),
("'s", A)))
m[1] = dict((
("a", A),
("e", A),
("i", A),
("o", A),
("s", W),
("y", B)))
def remove_ending(word):
length = len(word)
el = 11
while el > 0:
if length - el > 1:
ending = word[length-el:]
cond = m[el].get(ending)
if cond:
base = word[:length-el]
if cond(base):
return base
el -= 1
return word
_endings = (("iev", "ief"),
("uct", "uc"),
("iev", "ief"),
("uct", "uc"),
("umpt", "um"),
("rpt", "rb"),
("urs", "ur"),
("istr", "ister"),
("metr", "meter"),
("olv", "olut"),
("ul", "l", "aoi"),
("bex", "bic"),
("dex", "dic"),
("pex", "pic"),
("tex", "tic"),
("ax", "ac"),
("ex", "ec"),
("ix", "ic"),
("lux", "luc"),
("uad", "uas"),
("vad", "vas"),
("cid", "cis"),
("lid", "lis"),
("erid", "eris"),
("pand", "pans"),
("end", "ens", "s"),
("ond", "ons"),
("lud", "lus"),
("rud", "rus"),
("her", "hes", "pt"),
("mit", "mis"),
("ent", "ens", "m"),
("ert", "ers"),
("et", "es", "n"),
("yt", "ys"),
("yz", "ys"))
# Hash the ending rules by the last letter of the target ending
_endingrules = defaultdict(list)
for rule in _endings:
_endingrules[rule[0][-1]].append(rule)
_doubles = frozenset(("dd", "gg", "ll", "mm", "nn", "pp", "rr", "ss", "tt"))
def fix_ending(word):
if word[-2:] in _doubles:
word = word[:-1]
for endingrule in _endingrules[word[-1]]:
target, newend = endingrule[:2]
if word.endswith(target):
if len(endingrule) > 2:
exceptafter = endingrule[2]
c = word[0-(len(target)+1)]
if c in exceptafter: return word
return word[:0-len(target)] + newend
return word
def stem(word):
"""Returns the stemmed version of the argument string.
"""
return fix_ending(remove_ending(word))
| archatas/whoosh | whoosh/lang/lovins.py | Python | apache-2.0 | 12,657 | 0.003871 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Snapshots
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from integration.lib.utils import *
from integration.lib.base import *
from integration.lib.common import *
from marvin import remoteSSHClient
class Services:
"""Test Snapshots Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 200, # in MHz
"memory": 256, # In MBs
},
"disk_offering": {
"displaytext": "Small Disk",
"name": "Small Disk",
"disksize": 1
},
"server": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"mgmt_server": {
"ipaddress": '192.168.100.21',
"username": "root",
"password": "fr3sca",
"port": 22,
},
"recurring_snapshot": {
"intervaltype": 'HOURLY',
# Frequency of snapshots
"maxsnaps": 1, # Should be min 2
"schedule": 1,
"timezone": 'US/Arizona',
# Timezone Formats - http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack
},
"templates": {
"displaytext": 'Template',
"name": 'Template',
"ostypeid": '144f66aa-7f74-4cfe-9799-80cc21439cb3',
"templatefilter": 'self',
},
"diskdevice": "/dev/xvda",
"diskname": "TestDiskServ",
"size": 1, # GBs
"mount_dir": "/mnt/tmp",
"sub_dir": "test",
"sub_lvl_dir1": "test1",
"sub_lvl_dir2": "test2",
"random_data": "random.data",
"ostypeid": '144f66aa-7f74-4cfe-9799-80cc21439cb3',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
"mode" : 'advanced', # Networking mode: Advanced, Basic
}
class TestCreateVMsnapshotTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestCreateVMsnapshotTemplate, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["domainid"] = cls.domain.id
cls.services["server"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_createVM_snapshotTemplate(self):
"""Test create VM, Snapshot and Template
"""
# Validate the following
# 1. Deploy VM using default template, small service offering
# and small data disk offering.
# 2. Perform snapshot on the root disk of this VM.
# 3. Create a template from snapshot.
# 4. Create a instance from above created template.
# 5. listSnapshots should list the snapshot that was created.
# 6. verify that secondary storage NFS share contains the reqd
# volume under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid
# 7. verify backup_snap_id was non null in the `snapshots` table
# 8. listTemplates() should return the newly created Template,
# and check for template state as READY"
# 9. listVirtualMachines() command should return the deployed VM.
# State of this VM should be Running.
#Create Virtual Machine
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Created VM with ID: %s" % self.virtual_machine.id)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
volume = volumes[0]
# Create a snapshot from the ROOTDISK
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
self.debug("Snapshot created: ID - %s" % snapshot.id)
self.cleanup.append(snapshot)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
self.debug("select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% snapshot.id)
# Verify backup_snap_id is not NULL
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% snapshot.id
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
account_id = qresult[1]
volume_id = qresult[2]
# Generate template from the snapshot
template = Template.create_from_snapshot(
self.apiclient,
snapshot,
self.services["templates"]
)
self.debug("Created template from snapshot: %s" % template.id)
self.cleanup.append(template)
templates = list_templates(
self.apiclient,
templatefilter=\
self.services["templates"]["templatefilter"],
id=template.id
)
self.assertNotEqual(
templates,
None,
"Check if result exists in list item call"
)
self.assertEqual(
templates[0].isready,
True,
"Check new template state in list templates call"
)
# Deploy new virtual machine using template
new_virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["server"],
templateid=template.id,
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Created VM with ID: %s from template: %s" % (
new_virtual_machine.id,
template.id
))
self.cleanup.append(new_virtual_machine)
# Newly deployed VM should be 'Running'
virtual_machines = list_virtual_machines(
self.apiclient,
id=new_virtual_machine.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.assertEqual(
isinstance(virtual_machines, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(virtual_machines),
0,
"Check list virtual machines response"
)
for virtual_machine in virtual_machines:
self.assertEqual(
virtual_machine.state,
'Running',
"Check list VM response for Running state"
)
# Get the Secondary Storage details from list Hosts
hosts = list_hosts(
self.apiclient,
type='SecondaryStorage',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export/test"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export/test
# Sleep to ensure that snapshot is reflected in sec storage
time.sleep(self.services["sleep"])
try:
# Login to VM to check snapshot present on sec disk
ssh_client = remoteSSHClient.remoteSSHClient(
self.services["mgmt_server"]["ipaddress"],
self.services["mgmt_server"]["port"],
self.services["mgmt_server"]["username"],
self.services["mgmt_server"]["password"],
)
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
self.debug("command: %s" % c)
result = ssh_client.execute(c)
self.debug("Result: %s" % result)
except Exception as e:
self.fail("SSH failed for Management server: %s" %
self.services["mgmt_server"]["ipaddress"])
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
try:
for c in cmds:
self.debug("command: %s" % c)
result = ssh_client.execute(c)
self.debug("Result: %s" % result)
except Exception as e:
self.fail("SSH failed for Management server: %s" %
self.services["mgmt_server"]["ipaddress"])
res = str(uuids)
self.assertEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
return
class TestAccountSnapshotClean(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestAccountSnapshotClean, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.services["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id
)
# Get the Root disk of VM
volumes = list_volumes(
cls.api_client,
virtualmachineid=cls.virtual_machine.id,
type='ROOT',
listall=True
)
volume = volumes[0]
# Create a snapshot from the ROOTDISK
cls.snapshot = Snapshot.create(cls.api_client, volumes[0].id)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_02_accountSnapshotClean(self):
"""Test snapshot cleanup after account deletion
"""
# Validate the following
# 1. listAccounts API should list out the newly created account
# 2. listVirtualMachines() command should return the deployed VM.
# State of this VM should be "Running"
# 3. a)listSnapshots should list the snapshot that was created.
# b)verify that secondary storage NFS share contains the reqd volume
# under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid
# 4. a)listAccounts should not list account that is deleted
# b) snapshot image($snapshot_uuid) should be deleted from the
# /secondary/snapshots/$accountid/$volumeid/
accounts = list_accounts(
self.apiclient,
id=self.account.account.id
)
self.assertEqual(
isinstance(accounts, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(accounts),
0,
"Check list Accounts response"
)
# VM should be in 'Running' state
virtual_machines = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(virtual_machines, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(virtual_machines),
0,
"Check list virtual machines response"
)
for virtual_machine in virtual_machines:
self.debug("VM ID: %s, VM state: %s" % (
virtual_machine.id,
virtual_machine.state
))
self.assertEqual(
virtual_machine.state,
'Running',
"Check list VM response for Running state"
)
# Verify the snapshot was created or not
snapshots = list_snapshots(
self.apiclient,
id=self.snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
self.snapshot.id,
"Check snapshot id in list resources call"
)
# Fetch values from database
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% self.snapshot.id
)
self.assertEqual(
isinstance(qresultset, list),
True,
"Check DB response returns a valid list"
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
account_id = qresult[1]
volume_id = qresult[2]
# Get the Secondary Storage details from list Hosts
hosts = list_hosts(
self.apiclient,
type='SecondaryStorage',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export/test"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export/test
# Sleep to ensure that snapshot is reflected in sec storage
time.sleep(self.services["sleep"])
try:
# Login to Secondary storage VM to check snapshot present on sec disk
ssh_client = remoteSSHClient.remoteSSHClient(
self.services["mgmt_server"]["ipaddress"],
self.services["mgmt_server"]["port"],
self.services["mgmt_server"]["username"],
self.services["mgmt_server"]["password"],
)
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
self.debug("command: %s" % c)
result = ssh_client.execute(c)
self.debug("Result: %s" % result)
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
for c in cmds:
result = ssh_client.execute(c)
except Exception:
self.fail("SSH failed for management server: %s" %
self.services["mgmt_server"]["ipaddress"])
res = str(uuids)
self.assertEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
self.debug("Deleting account: %s" % self.account.account.name)
# Delete account
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
self.assertEqual(
isinstance(interval, list),
True,
"Check list response returns a valid list"
)
self.debug("account.cleanup.interval: %s" % interval[0].value)
# Wait for account cleanup interval
time.sleep(int(interval[0].value) * 2)
accounts = list_accounts(
self.apiclient,
id=self.account.account.id
)
self.assertEqual(
accounts,
None,
"List accounts should return empty list after account deletion"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export/test"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export/test
try:
cmds = [
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
self.debug("command: %s" % c)
result = ssh_client.execute(c)
self.debug("Result: %s" % result)
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
for c in cmds:
self.debug("command: %s" % c)
result = ssh_client.execute(c)
self.debug("Result: %s" % result)
except Exception:
self.fail("SSH failed for management server: %s" %
self.services["mgmt_server"]["ipaddress"])
res = str(uuids)
self.assertNotEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
return
class TestSnapshotDetachedDisk(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestSnapshotDetachedDisk, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.services["server"]["diskoffering"] = cls.disk_offering.id
cls.services["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_03_snapshot_detachedDisk(self):
"""Test snapshot from detached disk
"""
# Validate the following
# 1. login in VM and write some data on data disk(use fdisk to
# partition datadisk,fdisk /dev/sdb, and make filesystem using
# mkfs.ext3)
# 2. Detach the data disk and write some data on data disk
# 3. perform the snapshot on the detached volume
# 4. listvolumes with VM id shouldn't show the detached volume
# 5. listSnapshots should list the snapshot that was created
# 6. verify that secondary storage NFS share contains the reqd volume
# under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid
# 7. verify backup_snap_id was non null in the `snapshots` table
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
random_data_0 = random_gen(100)
random_data_1 = random_gen(100)
try:
ssh_client = self.virtual_machine.get_ssh_client()
#Format partition using ext3
format_volume_to_ext3(
ssh_client,
self.services["diskdevice"]
)
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s1 %s" % (
self.services["diskdevice"],
self.services["mount_dir"]
),
"pushd %s" % self.services["mount_dir"],
"mkdir -p %s/{%s,%s} " % (
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["sub_lvl_dir2"]
),
"echo %s > %s/%s/%s" % (
random_data_0,
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["random_data"]
),
"echo %s > %s/%s/%s" % (
random_data_1,
self.services["sub_dir"],
self.services["sub_lvl_dir2"],
self.services["random_data"]
),
"sync",
]
for c in cmds:
self.debug(ssh_client.execute(c))
#detach volume from VM
cmd = detachVolume.detachVolumeCmd()
cmd.id = volume.id
self.apiclient.detachVolume(cmd)
#Create snapshot from detached volume
snapshot = Snapshot.create(self.apiclient, volume.id)
self.cleanup.append(snapshot)
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True
)
self.assertEqual(
volumes,
None,
"Check Volume is detached"
)
# Verify the snapshot was created or not
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.virtual_machine.ipaddress)
# Fetch values from database
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% snapshot.id
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
account_id = qresult[1]
volume_id = qresult[2]
self.assertNotEqual(
str(qresult[0]),
'NULL',
"Check if backup_snap_id is not null"
)
# Get the Secondary Storage details from list Hosts
hosts = list_hosts(
self.apiclient,
type='SecondaryStorage',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export/test"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export/test
# Sleep to ensure that snapshot is reflected in sec storage
time.sleep(self.services["sleep"])
try:
# Login to Management server to check snapshot present on
# sec disk
ssh_client = remoteSSHClient.remoteSSHClient(
self.services["mgmt_server"]["ipaddress"],
self.services["mgmt_server"]["port"],
self.services["mgmt_server"]["username"],
self.services["mgmt_server"]["password"],
)
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
result = ssh_client.execute(c)
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
for c in cmds:
result = ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for management server: %s" %
self.services["mgmt_server"]["ipaddress"])
res = str(uuids)
self.assertEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
return
class TestSnapshotLimit(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestSnapshotLimit, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.services["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_04_snapshot_limit(self):
"""Test snapshot limit in snapshot policies
"""
# Validate the following
# 1. Perform hourly recurring snapshot on the root disk of VM and keep
# the maxsnapshots as 1
# 2. listSnapshots should list the snapshot that was created
# snapshot folder in secondary storage should contain only one
# snapshot image(/secondary/snapshots/$accountid/$volumeid/)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
# Create a snapshot policy
recurring_snapshot = SnapshotPolicy.create(
self.apiclient,
volume.id,
self.services["recurring_snapshot"]
)
self.cleanup.append(recurring_snapshot)
snapshot_policy = list_snapshot_policy(
self.apiclient,
id=recurring_snapshot.id,
volumeid=volume.id
)
self.assertEqual(
isinstance(snapshot_policy, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshot_policy,
None,
"Check if result exists in list item call"
)
self.assertEqual(
snapshot_policy[0].id,
recurring_snapshot.id,
"Check recurring snapshot id in list resources call"
)
self.assertEqual(
snapshot_policy[0].maxsnaps,
self.services["recurring_snapshot"]["maxsnaps"],
"Check interval type in list resources call"
)
# Sleep for (maxsnaps+1) hours to verify
# only maxsnaps snapshots are retained
time.sleep(
(self.services["recurring_snapshot"]["maxsnaps"]) * 3600
)
# Verify the snapshot was created or not
snapshots = list_snapshots(
self.apiclient,
volumeid=volume.id,
intervaltype=\
self.services["recurring_snapshot"]["intervaltype"],
snapshottype='RECURRING',
listall=True
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(snapshots),
self.services["recurring_snapshot"]["maxsnaps"],
"Check maximum number of recurring snapshots retained"
)
snapshot = snapshots[0]
# Sleep to ensure that snapshot is reflected in sec storage
time.sleep(self.services["sleep"])
# Fetch values from database
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% snapshot.id
)
self.assertEqual(
isinstance(qresultset, list),
True,
"Check DBQuery returns a valid list"
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
account_id = qresult[1]
volume_id = qresult[2]
# Get the Secondary Storage details from list Hosts
hosts = list_hosts(
self.apiclient,
type='SecondaryStorage',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export/test"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export/test
try:
# Login to VM to check snapshot present on sec disk
ssh_client = remoteSSHClient.remoteSSHClient(
self.services["mgmt_server"]["ipaddress"],
self.services["mgmt_server"]["port"],
self.services["mgmt_server"]["username"],
self.services["mgmt_server"]["password"],
)
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
result = ssh_client.execute(c)
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
for c in cmds:
result = ssh_client.execute(c)
except Exception as e:
raise Exception(
"SSH access failed for management server: %s" %
self.services["mgmt_server"]["ipaddress"])
res = str(uuids)
self.assertEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
return
class TestSnapshotEvents(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestSnapshotEvents, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.services["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_05_snapshot_events(self):
"""Test snapshot events
"""
# Validate the following
# 1. Perform snapshot on the root disk of this VM and check the events/alerts.
# 2. delete the snapshots and check the events/alerts
# 3. listEvents() shows created/deleted snapshot events
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
# Create a snapshot from the ROOTDISK
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
self.debug("Snapshot created with ID: %s" % snapshot.id)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
snapshot.delete(self.apiclient)
# Sleep to ensure that snapshot is deleted properly
time.sleep(self.services["sleep"])
events = list_events(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
type='SNAPSHOT.DELETE'
)
self.assertEqual(
isinstance(events, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
events,
None,
"Check if event exists in list events call"
)
self.assertIn(
events[0].state,
['Completed', 'Scheduled'],
"Check events state in list events call"
)
return
| cinderella/incubator-cloudstack | test/integration/component/test_snapshots.py | Python | apache-2.0 | 57,945 | 0.001778 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
class CustomHelpAction(argparse.Action):
'''Allows defining custom help actions.
Help actions can run even when the parser would otherwise fail on missing
arguments. The first help or custom help command mentioned on the command
line will have its help text displayed.
Usage:
parser = argparse.ArgumentParser(...)
CustomHelpAction.EnableFor(parser)
parser.add_argument('--foo-help',
action='custom_help',
custom_help_text='this is the help message',
help='What this helps with')
'''
# Derived from argparse._HelpAction from
# https://github.com/python/cpython/blob/master/Lib/argparse.py
# pylint: disable=redefined-builtin
# (complains about 'help' being redefined)
def __init__(self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
custom_help_text=None,
help=None):
super().__init__(option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
if not custom_help_text:
raise ValueError('custom_help_text is required')
self._help_text = custom_help_text
def __call__(self, parser, namespace, values, option_string=None):
print(self._help_text)
parser.exit()
@staticmethod
def EnableFor(parser):
parser.register('action', 'custom_help', CustomHelpAction)
| ric2b/Vivaldi-browser | chromium/build/android/pylib/utils/argparse_utils.py | Python | bsd-3-clause | 1,695 | 0.00649 |
import pytest
from marshmallow import ValidationError
from skylines.schemas import ClubSchema
def test_deserialization_fails_for_empty_name():
with pytest.raises(ValidationError) as e:
ClubSchema(only=("name",)).load(dict(name=""))
errors = e.value.messages
assert "name" in errors
assert "Must not be empty." in errors.get("name")
def test_deserialization_fails_for_spaced_name():
with pytest.raises(ValidationError) as e:
ClubSchema(only=("name",)).load(dict(name=" "))
errors = e.value.messages
assert "name" in errors
assert "Must not be empty." in errors.get("name")
def test_deserialization_passes_for_valid_name():
data = ClubSchema(only=("name",)).load(dict(name=" foo ")).data
assert data["name"] == "foo"
def test_deserialization_passes_for_valid_website():
data = ClubSchema(partial=True).load(dict(website="https://skylines.aero")).data
assert data["website"] == "https://skylines.aero"
def test_deserialization_passes_for_empty_website():
data = ClubSchema(partial=True).load(dict(website="")).data
assert data["website"] == ""
def test_deserialization_passes_for_null_website():
data = ClubSchema(partial=True).load(dict(website=None)).data
assert data["website"] is None
def test_deserialization_fails_for_invalid_website():
with pytest.raises(ValidationError) as e:
ClubSchema(partial=True).load(dict(website="foo"))
errors = e.value.messages
assert "website" in errors
assert "Not a valid URL." in errors.get("website")
def test_serialization_passes_for_invalid_website():
data = ClubSchema().dump(dict(website="foobar")).data
assert data["website"] == "foobar"
| skylines-project/skylines | tests/schemas/schemas/test_club.py | Python | agpl-3.0 | 1,722 | 0.000581 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
lasinfoPro.py
---------------------
Date : October 2014 and May 2016
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
class lasinfoPro(LAStoolsAlgorithm):
COMPUTE_DENSITY = "COMPUTE_DENSITY"
REPAIR_BB = "REPAIR_BB"
REPAIR_COUNTERS = "REPAIR_COUNTERS"
HISTO1 = "HISTO1"
HISTO2 = "HISTO2"
HISTO3 = "HISTO3"
HISTOGRAM = ["---", "x", "y", "z", "intensity", "classification", "scan_angle", "user_data", "point_source", "gps_time", "X", "Y", "Z"]
HISTO1_BIN = "HISTO1_BIN"
HISTO2_BIN = "HISTO2_BIN"
HISTO3_BIN = "HISTO3_BIN"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasinfoPro')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParameter(ParameterBoolean(lasinfoPro.COMPUTE_DENSITY,
self.tr("compute density"), False))
self.addParameter(ParameterBoolean(lasinfoPro.REPAIR_BB,
self.tr("repair bounding box"), False))
self.addParameter(ParameterBoolean(lasinfoPro.REPAIR_COUNTERS,
self.tr("repair counters"), False))
self.addParameter(ParameterSelection(lasinfoPro.HISTO1,
self.tr("histogram"), lasinfoPro.HISTOGRAM, 0))
self.addParameter(ParameterNumber(lasinfoPro.HISTO1_BIN,
self.tr("bin size"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasinfoPro.HISTO2,
self.tr("histogram"), lasinfoPro.HISTOGRAM, 0))
self.addParameter(ParameterNumber(lasinfoPro.HISTO2_BIN,
self.tr("bin size"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasinfoPro.HISTO3,
self.tr("histogram"), lasinfoPro.HISTOGRAM, 0))
self.addParameter(ParameterNumber(lasinfoPro.HISTO3_BIN,
self.tr("bin size"), 0, None, 1.0))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
if (LAStoolsUtils.hasWine()):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasinfo.exe")]
else:
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasinfo")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
if self.getParameterValue(lasinfoPro.COMPUTE_DENSITY):
commands.append("-cd")
if self.getParameterValue(lasinfoPro.REPAIR_BB):
commands.append("-repair_bb")
if self.getParameterValue(lasinfoPro.REPAIR_COUNTERS):
commands.append("-repair_counters")
histo = self.getParameterValue(lasinfoPro.HISTO1)
if histo != 0:
commands.append("-histo")
commands.append(lasinfoPro.HISTOGRAM[histo])
commands.append(unicode(self.getParameterValue(lasinfoPro.HISTO1_BIN)))
histo = self.getParameterValue(lasinfoPro.HISTO2)
if histo != 0:
commands.append("-histo")
commands.append(lasinfoPro.HISTOGRAM[histo])
commands.append(unicode(self.getParameterValue(lasinfoPro.HISTO2_BIN)))
histo = self.getParameterValue(lasinfoPro.HISTO3)
if histo != 0:
commands.append("-histo")
commands.append(lasinfoPro.HISTOGRAM[histo])
commands.append(unicode(self.getParameterValue(lasinfoPro.HISTO3_BIN)))
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
commands.append("-otxt")
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| AsgerPetersen/QGIS | python/plugins/processing/algs/lidar/lastools/lasinfoPro.py | Python | gpl-2.0 | 5,416 | 0.001846 |
import asyncio
import datetime
import logging
import signal
import yaml
import passlib.context
from merc import config
from merc import config_format
from merc import channel
from merc import feature
from merc import server
from merc import user
from merc import util
logger = logging.getLogger(__name__)
class Application(object):
def __init__(self, config_filename, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
self.creation_time = datetime.datetime.now()
self.features = feature.FeatureLoader(self)
self.users = user.UserStore(self)
self.channels = channel.ChannelStore(self)
self.network = server.Network(self)
self.crypt_context = None
self.config = None
self.config_filename = config_filename
self.reload_config()
self.register_signal_handlers()
@property
def server(self):
return self.network.local
def check_config(self, cfg):
config.validate(cfg, config_format.Config)
def reload_config(self):
self.features.unload_all()
with open(self.config_filename, "r") as f:
config = yaml.safe_load(f)
try:
self.check_config(config)
for feature_name in config["features"]:
self.features.load(feature_name)
self.features.check_config(config)
except Exception:
logger.critical("Configuration invalid.")
self.features.unload_all()
if self.config:
logger.critical("Reloading old configuration.")
for feature_name in self.config["features"]:
self.features.load(feature_name)
raise
else:
self.config = config
finally:
if self.config:
self.update_from_config()
def update_from_config(self):
self.network.update_local(
self.loop,
self.config["server"]["name"],
self.config["server"]["description"],
self.config["server"]["sid"])
self.crypt_context = passlib.context.CryptContext(
schemes=self.config["crypto"]["hash_schemes"])
def rehash(self):
@asyncio.coroutine
def coro():
yield from self.unbind()
self.reload_config()
yield from self.bind()
return asyncio.async(coro(), loop=self.loop)
@asyncio.coroutine
def bind(self):
yield from self.network.local.bind(self, self.config["bind"])
@asyncio.coroutine
def unbind(self):
yield from self.network.local.unbind()
@property
def version(self):
return util.get_version()
@property
def network_name(self):
return self.config["server"]["network_name"]
@property
def admin_location(self):
return self.config["admin"]["location"]
@property
def admin_location_fine(self):
return self.config["admin"]["location_fine"]
@property
def admin_name(self):
return self.config["admin"]["name"]
@property
def admin_email(self):
return self.config["admin"]["email"]
def register_signal_handlers(self):
signal.signal(signal.SIGHUP, lambda signum, frame: self.rehash())
def run_hooks(self, hook_name, *args, **kwargs):
for hook in self.features.get_hooks(hook_name):
hook(self, *args, **kwargs)
def get_feature_locals(self, feature):
return self.features[feature.NAME].server_locals
def start(self):
logger.info("Welcome to merc-{}, running for {} ({}) on network {}.".format(
util.get_version(), self.config["server"]["name"],
self.config["server"]["sid"], self.config["server"]["network_name"]))
self.loop.run_until_complete(self.bind())
self._autoconnect_links()
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.loop.run_until_complete(self.unbind())
self.loop.close()
def _autoconnect_links(self):
for server_name, link_spec in self.config["links"].items():
if link_spec["autoconnect"]:
self.network.connect(server_name)
def main():
import argparse
import coloredlogs
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--config", "-c", help="file to load configuration from",
default="merc.conf")
parser.add_argument("--verbose", "-v", help="enable verbose (debug) logging",
action="store_true", default=False)
args = parser.parse_args()
coloredlogs.install(level=logging.DEBUG if args.verbose else logging.INFO)
logging.getLogger("asyncio").setLevel(logging.WARN)
try:
app = Application(args.config)
app.start()
except config.ParseError as e:
logger.fatal('Could not load configuration file, aborting.')
logger.fatal(e)
except Exception as e:
logger.fatal('Could not initialize merc, aborting.')
logger.fatal(e)
| merc-devel/merc | merc/application.py | Python | mit | 4,731 | 0.013316 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_deathstar_debris_cultist_hum_m_02.iff"
result.attribute_template_id = 9
result.stfName("obj_n","unknown_creature")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/mobile/shared_dressed_deathstar_debris_cultist_hum_m_02.py | Python | mit | 464 | 0.047414 |
#!/usr/bin/python
import sys
print "divsum_analysis.py DivsumFile NumberOfNucleotides"
try:
file = sys.argv[1]
except:
file = raw_input("Introduce RepeatMasker's Divsum file: ")
try:
nucs = sys.argv[2]
except:
nucs = raw_input("Introduce number of analysed nucleotides: ")
nucs = int(nucs)
data = open(file).readlines()
s_matrix = data.index("Coverage for each repeat class and divergence (Kimura)\n")
matrix = []
elements = data[s_matrix+1]
elements = elements.split()
for element in elements[1:]:
matrix.append([element,[]])
n_el = len(matrix)
for line in data[s_matrix+2:]:
# print line
info = line.split()
info = info[1:]
for n in range(0,n_el):
matrix[n][1].append(int(info[n]))
abs = open(file+".abs", "w")
rel = open(file+".rel", "w")
for n in range(0,n_el):
abs.write("%s\t%s\n" % (matrix[n][0], sum(matrix[n][1])))
rel.write("%s\t%s\n" % (matrix[n][0], round(1.0*sum(matrix[n][1])/nucs,100)))
| fjruizruano/ngs-protocols | divsum_analysis.py | Python | gpl-3.0 | 974 | 0.011294 |
"""
This script is responsible for generating recommendations for the users. The general flow is as follows:
The best_model saved in HDFS is loaded with the help of model_id which is fetched from model_metadata_df.
`spark_user_id` and `recording_id` are fetched from top_artist_candidate_set_df and are given as input to the
recommender. An RDD of `user`, `product` and `rating` is returned from the recommender which is later converted to
a dataframe by filtering top X (an int supplied as an argument to the script) recommendations for all users sorted on rating
and fields renamed as `spark_user_id`, `recording_id` and `rating`. The ratings are scaled so that they lie between 0 and 1.
This dataframe is joined with recordings_df on recording_id to get the recording mbids which are then sent over the queue.
The same process is done for similar artist candidate set.
"""
import logging
import time
from py4j.protocol import Py4JJavaError
import listenbrainz_spark
from listenbrainz_spark import utils, path
from listenbrainz_spark.exceptions import (PathNotFoundException,
FileNotFetchedException,
SparkSessionNotInitializedException,
RecommendationsNotGeneratedException,
EmptyDataframeExcpetion)
from listenbrainz_spark.recommendations.recording.train_models import get_model_path
from listenbrainz_spark.recommendations.recording.candidate_sets import _is_empty_dataframe
from pyspark.sql import Row
import pyspark.sql.functions as func
from pyspark.sql.window import Window
from pyspark.sql.functions import col, udf, row_number
from pyspark.sql.types import DoubleType
from pyspark.mllib.recommendation import MatrixFactorizationModel
logger = logging.getLogger(__name__)
class RecommendationParams:
def __init__(self, recordings_df, model, top_artist_candidate_set_df, similar_artist_candidate_set_df,
recommendation_top_artist_limit, recommendation_similar_artist_limit):
self.recordings_df = recordings_df
self.model = model
self.top_artist_candidate_set_df = top_artist_candidate_set_df
self.similar_artist_candidate_set_df = similar_artist_candidate_set_df
self.recommendation_top_artist_limit = recommendation_top_artist_limit
self.recommendation_similar_artist_limit = recommendation_similar_artist_limit
def get_most_recent_model_id():
""" Get model id of recently created model.
Returns:
model_id (str): Model identification string.
"""
try:
model_metadata = utils.read_files_from_HDFS(path.RECOMMENDATION_RECORDING_MODEL_METADATA)
except PathNotFoundException as err:
logger.error(str(err), exc_info=True)
raise
except FileNotFetchedException as err:
logger.error(str(err), exc_info=True)
raise
latest_ts = model_metadata.select(func.max('model_created').alias('model_created')).take(1)[0].model_created
model_id = model_metadata.select('model_id') \
.where(col('model_created') == latest_ts).take(1)[0].model_id
return model_id
def load_model():
""" Load model from given path in HDFS.
"""
model_id = get_most_recent_model_id()
dest_path = get_model_path(model_id)
try:
model = MatrixFactorizationModel.load(listenbrainz_spark.context, dest_path)
return model
except Py4JJavaError as err:
logger.error('Unable to load model "{}"\n{}\nAborting...'.format(model_id, str(err.java_exception)),
exc_info=True)
raise
def get_recording_mbids(params: RecommendationParams, recommendation_df, users_df):
""" Get recording mbids corresponding to recommended recording ids sorted on rating.
Args:
params: RecommendationParams class object.
recommendation_df: Dataframe of spark_user_id, recording id and rating.
users_df : user_id and spark_user_id of active users.
Returns:
dataframe of recommended recording mbids and related info.
"""
df = params.recordings_df.join(recommendation_df, 'recording_id', 'inner') \
.select('rating',
'recording_mbid',
'spark_user_id')
recording_mbids_df = df.join(users_df, 'spark_user_id', 'inner')
window = Window.partitionBy('user_id').orderBy(col('rating').desc())
df = recording_mbids_df.withColumn('rank', row_number().over(window)) \
.select('recording_mbid',
'rank',
'rating',
'spark_user_id',
'user_id')
return df
def filter_recommendations_on_rating(df, limit):
""" Filter top X recommendations for each user on rating where X = limit.
Args:
df: Dataframe of user, product and rating.
limit (int): Number of recommendations to be filtered for each user.
Returns:
recommendation_df: Dataframe of spark_user_id, recording_id and rating.
"""
window = Window.partitionBy('user').orderBy(col('rating').desc())
recommendation_df = df.withColumn('rank', row_number().over(window)) \
.where(col('rank') <= limit) \
.select(col('rating'),
col('product').alias('recording_id'),
col('user').alias('spark_user_id'))
return recommendation_df
def generate_recommendations(candidate_set, params: RecommendationParams, limit):
""" Generate recommendations from the candidate set.
Args:
candidate_set (rdd): RDD of spark_user_id and recording_id.
params: RecommendationParams class object.
limit (int): Number of recommendations to be filtered for each user.
Returns:
recommendation_df: Dataframe of spark_user_id, recording_id and rating.
"""
recommendations = params.model.predictAll(candidate_set)
if recommendations.isEmpty():
raise RecommendationsNotGeneratedException('Recommendations not generated!')
df = listenbrainz_spark.session.createDataFrame(recommendations, schema=None)
recommendation_df = filter_recommendations_on_rating(df, limit)
return recommendation_df
def get_scale_rating_udf(rating):
""" Get user defined function (udf) to scale ratings so that they fall in the
range: 0.0 -> 1.0.
Args:
rating (float): score given to recordings by CF.
Returns:
rating udf.
"""
scaled_rating = (rating / 2.0) + 0.5
return round(min(max(scaled_rating, -1.0), 1.0), 3)
def scale_rating(df):
""" Scale the ratings column of dataframe so that they fall in the
range: 0.0 -> 1.0.
Args:
df: Dataframe to scale.
Returns:
df: Dataframe with scaled rating.
"""
scaling_udf = udf(get_scale_rating_udf, DoubleType())
df = df.withColumn("scaled_rating", scaling_udf(df.rating)) \
.select(col('recording_id'),
col('spark_user_id'),
col('scaled_rating').alias('rating'))
return df
def get_candidate_set_rdd_for_user(candidate_set_df, users):
""" Get candidate set RDD for a given user.
Args:
candidate_set_df: A dataframe of spark_user_id and recording_id for all users.
users: list of user names to generate recommendations for.
Returns:
candidate_set_rdd: An RDD of spark_user_id and recording_id for a given user.
"""
if users:
candidate_set_user_df = candidate_set_df.select('spark_user_id', 'recording_id') \
.where(col('user_id').isin(users))
else:
candidate_set_user_df = candidate_set_df.select('spark_user_id', 'recording_id')
if _is_empty_dataframe(candidate_set_user_df):
raise EmptyDataframeExcpetion('Empty Candidate sets!')
candidate_set_rdd = candidate_set_user_df.rdd.map(lambda r: (r['spark_user_id'], r['recording_id']))
return candidate_set_rdd
def get_user_name_and_user_id(params: RecommendationParams, users):
""" Get users from top artist candidate set.
Args:
params: RecommendationParams class object.
users = list of users names to generate recommendations.
Returns:
users_df: dataframe of user id and user names.
"""
if len(users) == 0:
users_df = params.top_artist_candidate_set_df.select('spark_user_id', 'user_id').distinct()
else:
users_df = params.top_artist_candidate_set_df.select('spark_user_id', 'user_id') \
.where(params.top_artist_candidate_set_df.user_id.isin(users)) \
.distinct()
if _is_empty_dataframe(users_df):
raise EmptyDataframeExcpetion('No active users found!')
return users_df
def check_for_ratings_beyond_range(top_artist_rec_df, similar_artist_rec_df):
""" Check if rating in top_artist_rec_df and similar_artist_rec_df does not belong to [-1, 1].
Args:
top_artist_rec_df (dataframe): Top artist recommendations for all users.
similar_artist_rec_df (dataframe): Similar artist recommendations for all users.
Returns:
a tuple of booleans (max out of range, min out of range)
"""
max_rating = top_artist_rec_df.select(func.max('rating').alias('rating')).take(1)[0].rating
max_rating = max(similar_artist_rec_df.select(func.max('rating').alias('rating')).take(1)[0].rating, max_rating)
min_rating = top_artist_rec_df.select(func.min('rating').alias('rating')).take(1)[0].rating
min_rating = min(similar_artist_rec_df.select(func.min('rating').alias('rating')).take(1)[0].rating, min_rating)
if max_rating > 1.0:
logger.info('Some ratings are greater than 1 \nMax rating: {}'.format(max_rating))
if min_rating < -1.0:
logger.info('Some ratings are less than -1 \nMin rating: {}'.format(min_rating))
return max_rating > 1.0, min_rating < -1.0
def create_messages(top_artist_rec_mbid_df, similar_artist_rec_mbid_df, active_user_count, total_time,
top_artist_rec_user_count, similar_artist_rec_user_count):
""" Create messages to send the data to the webserver via RabbitMQ.
Args:
top_artist_rec_mbid_df (dataframe): Top artist recommendations.
similar_artist_rec_mbid_df (dataframe): Similar artist recommendations.
active_user_count (int): Number of users active in the last week.
total_time (str): Time taken in exceuting the whole script.
top_artist_rec_user_count (int): Number of users for whom top artist recommendations were generated.
similar_artist_rec_user_count (int): Number of users for whom similar artist recommendations were generated.
Returns:
messages: A list of messages to be sent via RabbitMQ
"""
top_artist_rec_itr = top_artist_rec_mbid_df.toLocalIterator()
user_rec = {}
for row in top_artist_rec_itr:
if user_rec.get(row.user_id) is None:
user_rec[row.user_id] = {}
user_rec[row.user_id]['top_artist'] = [
{
"recording_mbid": row.recording_mbid,
"score": row.rating
}
]
user_rec[row.user_id]['similar_artist'] = []
else:
user_rec[row.user_id]['top_artist'].append(
{
"recording_mbid": row.recording_mbid,
"score": row.rating
}
)
similar_artist_rec_itr = similar_artist_rec_mbid_df.toLocalIterator()
for row in similar_artist_rec_itr:
if user_rec.get(row.user_id) is None:
user_rec[row.user_id] = {}
user_rec[row.user_id]['similar_artist'] = [
{
"recording_mbid": row.recording_mbid,
"score": row.rating
}
]
else:
user_rec[row.user_id]['similar_artist'].append(
{
"recording_mbid": row.recording_mbid,
"score": row.rating
}
)
for user_id, data in user_rec.items():
messages = {
'user_id': user_id,
'type': 'cf_recommendations_recording_recommendations',
'recommendations': {
'top_artist': data.get('top_artist', []),
'similar_artist': data.get('similar_artist', [])
}
}
yield messages
yield {
'type': 'cf_recommendations_recording_mail',
'active_user_count': active_user_count,
'top_artist_user_count': top_artist_rec_user_count,
'similar_artist_user_count': similar_artist_rec_user_count,
'total_time': '{:.2f}'.format(total_time / 3600)
}
def get_recommendations_for_all(params: RecommendationParams, users):
""" Get recommendations for all active users.
Args:
params: RecommendationParams class object.
users = list of users names to generate recommendations.
Returns:
top_artist_rec_df: Top artist recommendations.
similar_artist_rec_df: Similar artist recommendations.
"""
try:
top_artist_candidate_set_rdd = get_candidate_set_rdd_for_user(params.top_artist_candidate_set_df, users)
except EmptyDataframeExcpetion:
logger.error('Top artist candidate set not found for any user.', exc_info=True)
raise
try:
similar_artist_candidate_set_rdd = get_candidate_set_rdd_for_user(params.similar_artist_candidate_set_df, users)
except EmptyDataframeExcpetion:
logger.error('Similar artist candidate set not found for any user.', exc_info=True)
raise
try:
top_artist_rec_df = generate_recommendations(top_artist_candidate_set_rdd, params,
params.recommendation_top_artist_limit)
except RecommendationsNotGeneratedException:
logger.error('Top artist recommendations not generated for any user', exc_info=True)
raise
try:
similar_artist_rec_df = generate_recommendations(similar_artist_candidate_set_rdd, params,
params.recommendation_similar_artist_limit)
except RecommendationsNotGeneratedException:
logger.error('Similar artist recommendations not generated for any user', exc_info=True)
raise
return top_artist_rec_df, similar_artist_rec_df
def get_user_count(df):
""" Get distinct user count from the given dataframe.
"""
users_df = df.select('spark_user_id').distinct()
return users_df.count()
def main(recommendation_top_artist_limit=None, recommendation_similar_artist_limit=None, users=None):
try:
listenbrainz_spark.init_spark_session('Recommendations')
except SparkSessionNotInitializedException as err:
logger.error(str(err), exc_info=True)
raise
try:
recordings_df = utils.read_files_from_HDFS(path.RECOMMENDATION_RECORDINGS_DATAFRAME)
top_artist_candidate_set_df = utils.read_files_from_HDFS(path.RECOMMENDATION_RECORDING_TOP_ARTIST_CANDIDATE_SET)
similar_artist_candidate_set_df = utils.read_files_from_HDFS(path.RECOMMENDATION_RECORDING_SIMILAR_ARTIST_CANDIDATE_SET)
except PathNotFoundException as err:
logger.error(str(err), exc_info=True)
raise
except FileNotFetchedException as err:
logger.error(str(err), exc_info=True)
raise
logger.info('Loading model...')
model = load_model()
# an action must be called to persist data in memory
recordings_df.count()
recordings_df.persist()
params = RecommendationParams(recordings_df, model, top_artist_candidate_set_df,
similar_artist_candidate_set_df,
recommendation_top_artist_limit,
recommendation_similar_artist_limit)
try:
# timestamp when the script was invoked
ts_initial = time.monotonic()
users_df = get_user_name_and_user_id(params, users)
# Some users are excluded from the top_artist_candidate_set because of the limited data
# in the mapping. Therefore, active_user_count may or may not be equal to number of users
# active in the last week. Ideally, top_artist_candidate_set should give the active user count.
active_user_count = users_df.count()
users_df.persist()
logger.info('Took {:.2f}sec to get active user count'.format(time.monotonic() - ts_initial))
except EmptyDataframeExcpetion as err:
logger.error(str(err), exc_info=True)
raise
logger.info('Generating recommendations...')
ts = time.monotonic()
top_artist_rec_df, similar_artist_rec_df = get_recommendations_for_all(params, users)
logger.info('Recommendations generated!')
logger.info('Took {:.2f}sec to generate recommendations for all active users'.format(time.monotonic() - ts))
ts = time.monotonic()
top_artist_rec_user_count = get_user_count(top_artist_rec_df)
similar_artist_rec_user_count = get_user_count(similar_artist_rec_df)
logger.info('Took {:.2f}sec to get top artist and similar artist user count'.format(time.monotonic() - ts))
ts = time.monotonic()
check_for_ratings_beyond_range(top_artist_rec_df, similar_artist_rec_df)
top_artist_rec_scaled_df = scale_rating(top_artist_rec_df)
similar_artist_rec_scaled_df = scale_rating(similar_artist_rec_df)
logger.info('Took {:.2f}sec to scale the ratings'.format(time.monotonic() - ts))
ts = time.monotonic()
top_artist_rec_mbid_df = get_recording_mbids(params, top_artist_rec_scaled_df, users_df)
similar_artist_rec_mbid_df = get_recording_mbids(params, similar_artist_rec_scaled_df, users_df)
logger.info('Took {:.2f}sec to get mbids corresponding to recording ids'.format(time.monotonic() - ts))
# persisted data must be cleared from memory after usage to avoid OOM
recordings_df.unpersist()
total_time = time.monotonic() - ts_initial
logger.info('Total time: {:.2f}sec'.format(total_time))
result = create_messages(top_artist_rec_mbid_df, similar_artist_rec_mbid_df, active_user_count, total_time,
top_artist_rec_user_count, similar_artist_rec_user_count)
users_df.unpersist()
return result
| metabrainz/listenbrainz-server | listenbrainz_spark/recommendations/recording/recommend.py | Python | gpl-2.0 | 19,039 | 0.004254 |
#!/usr/bin/env python
import os, re, sys
if len(sys.argv) < 2:
print "usage: %s <recordfile>" % sys.argv[0]
sys.exit(1)
# Read record
filename = sys.argv[1]
fd = file(filename)
record = fd.read()
fd.close()
# Update revision
newrecord = []
lbreak = "\r\n"
for line in record.splitlines():
if line.startswith('# Revision:'):
rev = int(line.split(':')[1]) + 1
line = '# Revision: %u' % rev
newrecord.append(line)
newrecord = lbreak.join(newrecord)
# Setup mail values
address = 'freedb-submit@freedb.org'
ident = os.path.splitext(filename)[0]
if not re.search('^[a-z]+ [a-z0-9]{8}$', ident):
sys.exit(ident + " is not a valid freedb `discid genre' pair")
subject = "cddb %s" % ident
# Save updated record
fd = file(filename, "w")
fd.write(newrecord)
fd.close()
# Send mail
print "Subject:", subject
cmd = 'cat "%s" | mutt -s "%s" %s' % (filename, subject, address)
print "%", cmd
os.system(cmd)
| BackupTheBerlios/namingmuse | tools/freedb-submit.py | Python | gpl-2.0 | 936 | 0.001068 |
from graphql_relay.node.node import from_global_id
def get_errors(e):
# transform django errors to redux errors
# django: {"key1": [value1], {"key2": [value2]}}
# redux: ["key1", "value1", "key2", "value2"]
fields = e.message_dict.keys()
messages = ['; '.join(m) for m in e.message_dict.values()]
errors = [i for pair in zip(fields, messages) for i in pair]
return errors
def get_object(object_name, relayId, otherwise=None):
try:
return object_name.objects.get(pk=from_global_id(relayId)[1])
except:
return otherwise
def load_object(instance, args, exception=['id']):
if instance:
[setattr(instance, key, value) for key, value in args.items() if key not in exception]
return instance
| teamtaverna/core | app/api/cruds/utils.py | Python | mit | 760 | 0.002632 |
########################################################################
# $HeadURL$
# File : SSHComputingElement.py
# Author : Dumitru Laurentiu, A.T.
########################################################################
""" SSH (Virtual) Computing Element: For a given IP/host it will send jobs directly through ssh
"""
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC import S_OK, S_ERROR
from DIRAC import rootPath
from DIRAC import gLogger
import os, urllib
import shutil, tempfile
from types import StringTypes
__RCSID__ = "$Id$"
CE_NAME = 'SSH'
MANDATORY_PARAMETERS = [ 'Queue' ]
class SSH:
def __init__( self, user = None, host = None, password = None, key = None, parameters = {}, options = "" ):
self.user = user
if not user:
self.user = parameters.get( 'SSHUser', '' )
self.host = host
if not host:
self.host = parameters.get( 'SSHHost', '' )
self.password = password
if not password:
self.password = parameters.get( 'SSHPassword', '' )
self.key = key
if not key:
self.key = parameters.get( 'SSHKey', '' )
self.options = options
if not len(options):
self.options = parameters.get( 'SSHOptions', '' )
self.log = gLogger.getSubLogger( 'SSH' )
def __ssh_call( self, command, timeout ):
try:
import pexpect
expectFlag = True
except:
from DIRAC import shellCall
expectFlag = False
if not timeout:
timeout = 999
if expectFlag:
ssh_newkey = 'Are you sure you want to continue connecting'
try:
child = pexpect.spawn( command, timeout = timeout )
i = child.expect( [pexpect.TIMEOUT, ssh_newkey, pexpect.EOF, 'assword: '] )
if i == 0: # Timeout
return S_OK( ( -1, child.before, 'SSH login failed' ) )
elif i == 1: # SSH does not have the public key. Just accept it.
child.sendline ( 'yes' )
child.expect ( 'assword: ' )
i = child.expect( [pexpect.TIMEOUT, 'assword: '] )
if i == 0: # Timeout
return S_OK( ( -1, str( child.before ) + str( child.after ), 'SSH login failed' ) )
elif i == 1:
child.sendline( self.password )
child.expect( pexpect.EOF )
return S_OK( ( 0, child.before, '' ) )
elif i == 2:
# Passwordless login, get the output
return S_OK( ( 0, child.before, '' ) )
if self.password:
child.sendline( self.password )
child.expect( pexpect.EOF )
return S_OK( ( 0, child.before, '' ) )
else:
return S_ERROR( ( -2, child.before, '' ) )
except Exception, x:
res = ( -1 , 'Encountered exception %s: %s' % ( Exception, str( x ) ) )
return S_ERROR( res )
else:
# Try passwordless login
result = shellCall( timeout, command )
# print ( "!!! SSH command: %s returned %s\n" % (command, result) )
if result['Value'][0] == 255:
return S_ERROR ( ( -1, 'Cannot connect to host %s' % self.host, '' ) )
return result
def sshCall( self, timeout, cmdSeq ):
""" Execute remote command via a ssh remote call
"""
command = cmdSeq
if type( cmdSeq ) == type( [] ):
command = ' '.join( cmdSeq )
key = ''
if self.key:
key = ' -i %s ' % self.key
pattern = "'===><==='"
command = 'ssh -q %s -l %s %s %s "echo %s;%s"' % ( key, self.user, self.host, self.options, pattern, command )
self.log.debug( "SSH command %s" % command )
result = self.__ssh_call( command, timeout )
self.log.debug( "SSH command result %s" % str( result ) )
if not result['OK']:
return result
# Take the output only after the predefined pattern
ind = result['Value'][1].find('===><===')
if ind == -1:
return result
status,output,error = result['Value']
output = output[ind+8:]
if output.startswith('\r'):
output = output[1:]
if output.startswith('\n'):
output = output[1:]
result['Value'] = ( status,output,error )
return result
def scpCall( self, timeout, localFile, destinationPath, upload = True ):
""" Execute scp copy
"""
key = ''
if self.key:
key = ' -i %s ' % self.key
if upload:
command = "scp %s %s %s %s@%s:%s" % ( key, self.options, localFile, self.user, self.host, destinationPath )
else:
command = "scp %s %s %s@%s:%s %s" % ( key, self.options, self.user, self.host, destinationPath, localFile )
self.log.debug( "SCP command %s" % command )
return self.__ssh_call( command, timeout )
class SSHComputingElement( ComputingElement ):
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.controlScript = 'sshce'
self.submittedJobs = 0
self.mandatoryParameters = MANDATORY_PARAMETERS
#############################################################################
def _addCEConfigDefaults( self ):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults( self )
# Now batch system specific ones
if 'ExecQueue' not in self.ceParameters:
self.ceParameters['ExecQueue'] = self.ceParameters.get( 'Queue', '' )
if 'SharedArea' not in self.ceParameters:
#. isn't a good location, move to $HOME
self.ceParameters['SharedArea'] = '$HOME'
if 'BatchOutput' not in self.ceParameters:
self.ceParameters['BatchOutput'] = 'data'
if 'BatchError' not in self.ceParameters:
self.ceParameters['BatchError'] = 'data'
if 'ExecutableArea' not in self.ceParameters:
self.ceParameters['ExecutableArea'] = 'data'
if 'InfoArea' not in self.ceParameters:
self.ceParameters['InfoArea'] = 'info'
if 'WorkArea' not in self.ceParameters:
self.ceParameters['WorkArea'] = 'work'
if 'SubmitOptions' not in self.ceParameters:
self.ceParameters['SubmitOptions'] = '-'
def _reset( self ):
""" Process CE parameters and make necessary adjustments
"""
self.queue = self.ceParameters['Queue']
if 'ExecQueue' not in self.ceParameters or not self.ceParameters['ExecQueue']:
self.ceParameters['ExecQueue'] = self.ceParameters.get( 'Queue', '' )
self.execQueue = self.ceParameters['ExecQueue']
self.log.info( "Using queue: ", self.queue )
self.sharedArea = self.ceParameters['SharedArea']
self.batchOutput = self.ceParameters['BatchOutput']
if not self.batchOutput.startswith( '/' ):
self.batchOutput = os.path.join( self.sharedArea, self.batchOutput )
self.batchError = self.ceParameters['BatchError']
if not self.batchError.startswith( '/' ):
self.batchError = os.path.join( self.sharedArea, self.batchError )
self.infoArea = self.ceParameters['InfoArea']
if not self.infoArea.startswith( '/' ):
self.infoArea = os.path.join( self.sharedArea, self.infoArea )
self.executableArea = self.ceParameters['ExecutableArea']
if not self.executableArea.startswith( '/' ):
self.executableArea = os.path.join( self.sharedArea, self.executableArea )
self.workArea = self.ceParameters['WorkArea']
if not self.workArea.startswith( '/' ):
self.workArea = os.path.join( self.sharedArea, self.workArea )
result = self._prepareRemoteHost()
self.submitOptions = ''
if 'SubmitOptions' in self.ceParameters:
self.submitOptions = self.ceParameters['SubmitOptions']
self.removeOutput = True
if 'RemoveOutput' in self.ceParameters:
if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']:
self.removeOutput = False
def _prepareRemoteHost(self, host=None ):
""" Prepare remote directories and upload control script
"""
ssh = SSH( host = host, parameters = self.ceParameters )
# Make remote directories
dirTuple = tuple ( uniqueElements( [self.sharedArea,
self.executableArea,
self.infoArea,
self.batchOutput,
self.batchError,
self.workArea] ) )
nDirs = len( dirTuple )
cmd = 'mkdir -p %s; '*nDirs % dirTuple
self.log.verbose( 'Creating working directories on %s' % self.ceParameters['SSHHost'] )
result = ssh.sshCall( 30, cmd )
if not result['OK']:
self.log.warn( 'Failed creating working directories: %s' % result['Message'][1] )
return result
status,output,error = result['Value']
if status == -1:
self.log.warn( 'TImeout while creating directories' )
return S_ERROR( 'TImeout while creating directories' )
if "cannot" in output:
self.log.warn( 'Failed to create directories: %s' % output )
return S_ERROR( 'Failed to create directories: %s' % output )
# Upload the control script now
sshScript = os.path.join( rootPath, "DIRAC", "Resources", "Computing", "remote_scripts", self.controlScript )
self.log.verbose( 'Uploading %s script to %s' % ( self.controlScript, self.ceParameters['SSHHost'] ) )
result = ssh.scpCall( 30, sshScript, self.sharedArea )
if not result['OK']:
self.log.warn( 'Failed uploading control script: %s' % result['Message'][1] )
return result
status,output,error = result['Value']
if status != 0:
if status == -1:
self.log.warn( 'Timeout while uploading control script' )
return S_ERROR( 'Timeout while uploading control script' )
else:
self.log.warn( 'Failed uploading control script: %s' % output )
return S_ERROR( 'Failed uploading control script' )
# Chmod the control scripts
self.log.verbose( 'Chmod +x control script' )
result = ssh.sshCall( 10, "chmod +x %s/%s" % ( self.sharedArea, self.controlScript ) )
if not result['OK']:
self.log.warn( 'Failed chmod control script: %s' % result['Message'][1] )
return result
status,output,error = result['Value']
if status != 0:
if status == -1:
self.log.warn( 'Timeout while chmod control script' )
return S_ERROR( 'Timeout while chmod control script' )
else:
self.log.warn( 'Failed uploading chmod script: %s' % output )
return S_ERROR( 'Failed uploading chmod script' )
return S_OK()
def submitJob( self, executableFile, proxy, numberOfJobs = 1 ):
# self.log.verbose( "Executable file path: %s" % executableFile )
if not os.access( executableFile, 5 ):
os.chmod( executableFile, 0755 )
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
if proxy:
self.log.verbose( 'Setting up proxy for payload' )
wrapperContent = bundleProxy( executableFile, proxy )
name = writeScript( wrapperContent, os.getcwd() )
submitFile = name
else: # no proxy
submitFile = executableFile
result = self._submitJobToHost( submitFile, numberOfJobs )
if proxy:
os.remove( submitFile )
return result
def _submitJobToHost( self, executableFile, numberOfJobs, host = None ):
""" Submit prepared executable to the given host
"""
ssh = SSH( host = host, parameters = self.ceParameters )
# Copy the executable
sFile = os.path.basename( executableFile )
result = ssh.scpCall( 10, executableFile, '%s/%s' % ( self.executableArea, os.path.basename( executableFile ) ) )
if not result['OK']:
return result
jobStamps = []
for i in range( numberOfJobs ):
jobStamps.append( makeGuid()[:8] )
jobStamp = '#'.join( jobStamps )
subOptions = urllib.quote( self.submitOptions )
cmd = "bash --login -c '%s/%s submit_job %s/%s %s %s %s %d %s %s %s %s'" % ( self.sharedArea,
self.controlScript,
self.executableArea,
os.path.basename( executableFile ),
self.batchOutput,
self.batchError,
self.workArea,
numberOfJobs,
self.infoArea,
jobStamp,
self.execQueue,
subOptions )
self.log.verbose( 'CE submission command: %s' % cmd )
result = ssh.sshCall( 120, cmd )
if not result['OK']:
self.log.error( '%s CE job submission failed' % self.ceType, result['Message'] )
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
submitHost = host
if host is None:
submitHost = self.ceParameters['SSHHost'].split('/')[0]
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from job submission: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed local batch job submission: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed job submission, reason: %s' % message )
else:
batchIDs = outputLines[1:]
jobIDs = [ self.ceType.lower()+'://'+self.ceName+'/'+id for id in batchIDs ]
else:
return S_ERROR( '\n'.join( [sshStdout,sshStderr] ) )
result = S_OK ( jobIDs )
self.submittedJobs += len( batchIDs )
return result
def killJob( self, jobIDList ):
""" Kill a bunch of jobs
"""
if type( jobIDList ) in StringTypes:
jobIDList = [jobIDList]
return self._killJobOnHost( jobIDList )
def _killJobOnHost( self, jobIDList, host = None ):
""" Kill the jobs for the given list of job IDs
"""
resultDict = {}
ssh = SSH( host = host, parameters = self.ceParameters )
jobDict = {}
for job in jobIDList:
result = pfnparse( job )
if result['OK']:
stamp = result['Value']['FileName']
else:
self.log.error( 'Invalid job id', job )
continue
jobDict[stamp] = job
stampList = jobDict.keys()
cmd = "bash --login -c '%s/%s kill_job %s %s'" % ( self.sharedArea, self.controlScript, '#'.join( stampList ), self.infoArea )
result = ssh.sshCall( 10, cmd )
if not result['OK']:
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from job kill: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed local batch job kill: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed job kill, reason: %s' % message )
else:
return S_ERROR( '\n'.join( [sshStdout,sshStderr] ) )
return S_OK()
def _getHostStatus( self, host = None ):
""" Get jobs running at a given host
"""
ssh = SSH( host = host, parameters = self.ceParameters )
cmd = "bash --login -c '%s/%s status_info %s %s %s %s'" % ( self.sharedArea,
self.controlScript,
self.infoArea,
self.workArea,
self.ceParameters['SSHUser'],
self.execQueue )
result = ssh.sshCall( 10, cmd )
if not result['OK']:
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
resultDict = {}
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from CE get status: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed to get CE status: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed to get CE status, reason: %s' % message )
else:
for line in outputLines[1:]:
if ':::' in line:
jobStatus, nJobs = line.split( ':::' )
resultDict[jobStatus] = int( nJobs )
else:
return S_ERROR( '\n'.join( [sshStdout,sshStderr] ) )
return S_OK( resultDict )
def getCEStatus( self, jobIDList = None ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.submittedJobs
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
resultHost = self._getHostStatus()
if not resultHost['OK']:
return resultHost
result['RunningJobs'] = resultHost['Value'].get( 'Running',0 )
result['WaitingJobs'] = resultHost['Value'].get( 'Waiting',0 )
self.log.verbose( 'Waiting Jobs: ', result['WaitingJobs'] )
self.log.verbose( 'Running Jobs: ', result['RunningJobs'] )
return result
def getJobStatus( self, jobIDList ):
""" Get the status information for the given list of jobs
"""
return self._getJobStatusOnHost( jobIDList )
def _getJobStatusOnHost( self, jobIDList, host = None ):
""" Get the status information for the given list of jobs
"""
# self.log.verbose( '*** getUnitJobStatus %s - %s\n' % ( jobIDList, host) )
resultDict = {}
ssh = SSH( host = host, parameters = self.ceParameters )
jobDict = {}
for job in jobIDList:
result = pfnparse( job )
if result['OK']:
stamp = result['Value']['FileName']
else:
self.log.error( 'Invalid job id', job )
continue
jobDict[stamp] = job
stampList = jobDict.keys()
cmd = "bash --login -c '%s/%s job_status %s %s %s'" % ( self.sharedArea,
self.controlScript,
'#'.join( stampList ),
self.infoArea,
self.ceParameters['SSHUser'] )
result = ssh.sshCall( 30, cmd )
if not result['OK']:
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
if sshStatus == 0:
outputLines = sshStdout.strip().replace('\r','').split('\n')
try:
index = outputLines.index('============= Start output ===============')
outputLines = outputLines[index+1:]
except:
return S_ERROR( "Invalid output from job get status: %s" % outputLines[0] )
try:
status = int( outputLines[0] )
except:
return S_ERROR( "Failed local batch job status: %s" % outputLines[0] )
if status != 0:
message = "Unknown reason"
if len( outputLines ) > 1:
message = outputLines[1]
return S_ERROR( 'Failed job kill, reason: %s' % message )
else:
for line in outputLines[1:]:
jbundle = line.split( ':::' )
if ( len( jbundle ) == 2 ):
resultDict[jobDict[jbundle[0]]] = jbundle[1]
else:
return S_ERROR( '\n'.join( [sshStdout,sshStderr] ) )
# self.log.verbose( ' !!! getUnitJobStatus will return : %s\n' % resultDict )
return S_OK( resultDict )
def _getJobOutputFiles( self, jobID ):
""" Get output file names for the specific CE
"""
result = pfnparse( jobID )
if not result['OK']:
return result
jobStamp = result['Value']['FileName']
host = result['Value']['Host']
output = '%s/%s.out' % ( self.batchOutput, jobStamp )
error = '%s/%s.err' % ( self.batchError, jobStamp )
return S_OK( (jobStamp,host,output,error) )
def getJobOutput( self, jobID, localDir = None ):
""" Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
result = self._getJobOutputFiles(jobID)
if not result['OK']:
return result
jobStamp,host,outputFile,errorFile = result['Value']
self.log.verbose( 'Getting output for jobID %s' % jobID )
if not localDir:
tempDir = tempfile.mkdtemp()
else:
tempDir = localDir
ssh = SSH( parameters = self.ceParameters )
result = ssh.scpCall( 20, '%s/%s.out' % ( tempDir, jobStamp ), '%s' % outputFile, upload = False )
if not result['OK']:
return result
if not os.path.exists( '%s/%s.out' % ( tempDir, jobStamp ) ):
os.system( 'touch %s/%s.out' % ( tempDir, jobStamp ) )
result = ssh.scpCall( 20, '%s/%s.err' % ( tempDir, jobStamp ), '%s' % errorFile, upload = False )
if not result['OK']:
return result
if not os.path.exists( '%s/%s.err' % ( tempDir, jobStamp ) ):
os.system( 'touch %s/%s.err' % ( tempDir, jobStamp ) )
# The result is OK, we can remove the output
if self.removeOutput:
result = ssh.sshCall( 10, 'rm -f %s/%s.out %s/%s.err' % ( self.batchOutput, jobStamp, self.batchError, jobStamp ) )
if localDir:
return S_OK( ( '%s/%s.out' % ( tempDir, jobStamp ), '%s/%s.err' % ( tempDir, jobStamp ) ) )
else:
# Return the output as a string
outputFile = open( '%s/%s.out' % ( tempDir, jobStamp ), 'r' )
output = outputFile.read()
outputFile.close()
outputFile = open( '%s/%s.err' % ( tempDir, jobStamp ), 'r' )
error = outputFile.read()
outputFile.close()
shutil.rmtree( tempDir )
return S_OK( ( output, error ) )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| calancha/DIRAC | Resources/Computing/SSHComputingElement.py | Python | gpl-3.0 | 24,359 | 0.036332 |
"""
Small POP server. Heavilly based on
pypopper: a file-based pop3 server (http://code.activestate.com/recipes/534131-pypopper-python-pop3-server/)
Useage:
python server.py
Will return all mail*.txt in the current folder as mail. Output is also printed.
"""
import logging
import socket
import glob
logging.basicConfig(format="%(message)s")
log = logging.getLogger("pypopper")
log.setLevel(logging.INFO)
class ChatterboxConnection(object):
END = "\r\n"
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def sendall(self, data, END=END):
if len(data) < 50:
log.debug("send: %r", data)
else:
log.debug("send: %r...", data[:50])
data += END
self.conn.sendall(data)
def recvall(self, END=END):
data = []
while True:
chunk = self.conn.recv(4096)
if END in chunk:
data.append(chunk[:chunk.index(END)])
break
data.append(chunk)
if len(data) > 1:
pair = data[-2] + data[-1]
if END in pair:
data[-2] = pair[:pair.index(END)]
data.pop()
break
log.debug("recv: %r", "".join(data))
return "".join(data)
class Message(object):
def __init__(self, filename):
global MSG_INDEX
msg = open(filename, "r")
try:
self.data = data = msg.read()
self.size = len(data)
self.top, bot = data.split("\r\n\r\n", 1)
self.bot = bot.split("\r\n")
self.index = int(filename.split('mail')[1].split('.txt')[0])
finally:
msg.close()
def handleUser(data, msgs):
log.info("USER:%s", data.split()[1])
return "+OK user accepted"
def handlePass(data, msgs):
log.info("PASS:%s", data.split()[1])
return "+OK pass accepted"
def handleStat(data, msgs):
return "+OK %i %i" % (len(msgs), sum([msg.size for msg in msgs]))
def handleList(data, msgs):
return "+OK %i messages (%i octets)\r\n%s\r\n." % (len(msgs), sum([msg.size for msg in msgs]), '\r\n'.join(["%i %i" % (msg.index, msg.size,) for msg in msgs]))
def handleTop(data, msgs):
cmd, num, lines = data.split()
lines = int(lines)
msg = msgs[int(num) - 1]
text = msg.top + "\r\n\r\n" + "\r\n".join(msg.bot[:lines])
return "+OK top of message follows\r\n%s\r\n." % text
def handleRetr(data, msgs):
log.info("RETRIVE:%s", data.split()[1])
msg = msgs[int(data.split()[1]) - 1]
return "+OK %i octets\r\n%s\r\n." % (msg.size, msg.data)
def handleDele(data, msgs):
log.info("DELETE:%s", data.split()[1])
return "+OK message 1 deleted"
def handleNoop(data, msgs):
return "+OK"
def handleQuit(data, msgs):
return "+OK pypopper POP3 server signing off"
dispatch = dict(
USER=handleUser,
PASS=handlePass,
STAT=handleStat,
LIST=handleList,
TOP=handleTop,
RETR=handleRetr,
DELE=handleDele,
NOOP=handleNoop,
QUIT=handleQuit,
)
def serve(host, port, filenames):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
try:
if host:
hostname = host
else:
hostname = "localhost"
log.debug("pypopper POP3 serving '%s' on %s:%s", filenames, hostname, port)
while True:
sock.listen(1)
conn, addr = sock.accept()
log.debug('Connected by %s', addr)
try:
msgs = range(0, len(filenames))
for f in filenames:
msg = Message(f)
msgs[msg.index-1] = msg
conn = ChatterboxConnection(conn)
conn.sendall("+OK pypopper file-based pop3 server ready")
while True:
data = conn.recvall()
command = data.split(None, 1)[0]
try:
cmd = dispatch[command]
except KeyError:
conn.sendall("-ERR unknown command")
else:
conn.sendall(cmd(data, msgs))
if cmd is handleQuit:
return
finally:
conn.close()
msgs = None
except (SystemExit, KeyboardInterrupt):
log.info("pypopper stopped")
except Exception as ex:
log.critical("fatal error", exc_info=ex)
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
if __name__ == "__main__":
filenames = glob.glob("./mail[0-9]*.txt")
serve("127.0.0.1", 22110, filenames)
| ebu/PlugIt | tests/helpers/pop_server/server.py | Python | bsd-3-clause | 4,827 | 0.001243 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import admin_password
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get(self, context, id):
return {'uuid': id}
def fake_get_non_existed(self, context, id):
raise exception.InstanceNotFound(instance_id=id)
def fake_set_admin_password(self, context, instance, password=None):
pass
def fake_set_admin_password_failed(self, context, instance, password=None):
raise exception.InstancePasswordSetFailed(instance=instance, reason='')
def fake_set_admin_password_non_implement(self, context, instance,
password=None):
raise NotImplementedError()
class AdminPasswordTest(test.NoDBTestCase):
def setUp(self):
super(AdminPasswordTest, self).setUp()
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.app = fakes.wsgi_app_v3(init_only=('servers',
admin_password.ALIAS))
def _make_request(self, url, body):
req = webob.Request.blank(url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
res = req.get_response(self.app)
return res
def test_change_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 204)
def test_change_password_empty_string(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': ''}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 204)
def test_change_password_with_non_implement(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_non_implement)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 501)
def test_change_password_with_non_existed_instance(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'get', fake_get_non_existed)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 404)
def test_change_password_with_non_string_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 1234}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
def test_change_password_failed(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_failed)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 409)
def test_change_password_without_admin_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
def test_change_password_none(self):
url = '/v3/servers/1/action'
body = {'change_password': None}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
class AdminPasswordXMLTest(test.NoDBTestCase):
def setUp(self):
super(AdminPasswordXMLTest, self).setUp()
self.deserializer = admin_password.ChangePasswordDeserializer()
def test_change_password_deserializer(self):
request = '<change_password admin_password="1"></change_password>'
expected = {'body': {'change_password': {'admin_password': '1'}}}
res = self.deserializer.default(request)
self.assertEqual(res, expected)
def test_change_password_deserializer_without_admin_password(self):
request = '<change_password></change_password>'
expected = {'body': {'change_password': None}}
res = self.deserializer.default(request)
self.assertEqual(res, expected)
def test_change_pass_no_pass(self):
request = """<?xml version="1.0" encoding="UTF-8"?>
<change_password
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
request = self.deserializer.default(request)
expected = {
"change_password": None
}
self.assertEqual(request['body'], expected)
def test_change_pass_empty_pass(self):
request = """<?xml version="1.0" encoding="UTF-8"?>
<change_password
xmlns="http://docs.openstack.org/compute/api/v1.1"
admin_password=""/> """
request = self.deserializer.default(request)
expected = {
"change_password": {
"admin_password": "",
},
}
self.assertEqual(request['body'], expected)
| sacharya/nova | nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py | Python | apache-2.0 | 5,987 | 0 |
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_i6300_esb_watchdog import V1I6300ESBWatchdog
class TestV1I6300ESBWatchdog(unittest.TestCase):
""" V1I6300ESBWatchdog unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1I6300ESBWatchdog(self):
"""
Test V1I6300ESBWatchdog
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_i6300_esb_watchdog.V1I6300ESBWatchdog()
pass
if __name__ == '__main__':
unittest.main()
| kubevirt/client-python | test/test_v1_i6300_esb_watchdog.py | Python | apache-2.0 | 927 | 0.001079 |
# Copyright 2014 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
from unittest import mock
import sqlalchemy as sa
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral import exceptions as exc
from mistral.services import security
from mistral.tests.unit.api import base
from mistral.tests.unit import base as unit_base
WF = models.WorkflowDefinition(
spec={
'version': '2.0',
'name': 'my_wf',
'tasks': {
'task1': {
'action': 'std.noop'
}
}
}
)
WF.update({'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'my_wf'})
TRIGGER = {
'id': '02abb422-55ef-4bb2-8cb9-217a583a6a3f',
'name': 'my_cron_trigger',
'pattern': '* * * * *',
'workflow_name': WF.name,
'workflow_id': '123e4567-e89b-12d3-a456-426655440000',
'workflow_input': '{}',
'workflow_params': '{}',
'scope': 'private',
'remaining_executions': 42
}
trigger_values = copy.deepcopy(TRIGGER)
trigger_values['workflow_input'] = json.loads(
trigger_values['workflow_input'])
trigger_values['workflow_params'] = json.loads(
trigger_values['workflow_params'])
TRIGGER_DB = models.CronTrigger()
TRIGGER_DB.update(trigger_values)
TRIGGER_DB_WITH_PROJECT_ID = TRIGGER_DB.get_clone()
TRIGGER_DB_WITH_PROJECT_ID.project_id = '<default-project>'
MOCK_WF = mock.MagicMock(return_value=WF)
MOCK_TRIGGER = mock.MagicMock(return_value=TRIGGER_DB)
MOCK_TRIGGERS = mock.MagicMock(return_value=[TRIGGER_DB])
MOCK_DELETE = mock.MagicMock(return_value=1)
MOCK_EMPTY = mock.MagicMock(return_value=[])
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError())
class TestCronTriggerController(base.APITest):
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
def test_get(self):
resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
@mock.patch.object(db_api, 'get_cron_trigger')
def test_get_operational_error(self, mocked_get):
mocked_get.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
TRIGGER_DB # Successful run
]
resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
@mock.patch.object(db_api, "get_cron_trigger",
return_value=TRIGGER_DB_WITH_PROJECT_ID)
def test_get_within_project_id(self, mock_get):
resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_NOT_FOUND)
def test_get_not_found(self):
resp = self.app.get(
'/v2/cron_triggers/my_cron_trigger',
expect_errors=True
)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
def test_get_by_id(self):
resp = self.app.get(
"/v2/cron_triggers/02abb422-55ef-4bb2-8cb9-217a583a6a3f")
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
@mock.patch.object(db_api, "get_workflow_definition", MOCK_WF)
@mock.patch.object(db_api, "create_cron_trigger")
def test_post(self, mock_mtd):
mock_mtd.return_value = TRIGGER_DB
resp = self.app.post_json('/v2/cron_triggers', TRIGGER)
self.assertEqual(201, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
self.assertEqual(1, mock_mtd.call_count)
values = mock_mtd.call_args[0][0]
self.assertEqual('* * * * *', values['pattern'])
self.assertEqual(42, values['remaining_executions'])
@mock.patch.object(db_api, "get_workflow_definition", MOCK_WF)
@mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE)
@mock.patch.object(security, "delete_trust")
def test_post_dup(self, delete_trust):
resp = self.app.post_json(
'/v2/cron_triggers', TRIGGER, expect_errors=True
)
self.assertEqual(1, delete_trust.call_count)
self.assertEqual(409, resp.status_int)
@mock.patch.object(db_api, "get_workflow_definition", MOCK_WF)
@mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE)
def test_post_same_wf_and_input(self):
trig = TRIGGER.copy()
trig['name'] = 'some_trigger_name'
resp = self.app.post_json(
'/v2/cron_triggers', trig, expect_errors=True
)
self.assertEqual(409, resp.status_int)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
@mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE)
@mock.patch.object(security, "delete_trust")
def test_delete(self, delete_trust):
resp = self.app.delete('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(1, delete_trust.call_count)
self.assertEqual(204, resp.status_int)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
@mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE)
@mock.patch.object(security, "delete_trust")
def test_delete_by_id(self, delete_trust):
resp = self.app.delete(
'/v2/cron_triggers/02abb422-55ef-4bb2-8cb9-217a583a6a3f')
self.assertEqual(1, delete_trust.call_count)
self.assertEqual(204, resp.status_int)
@mock.patch.object(db_api, "delete_cron_trigger", MOCK_NOT_FOUND)
def test_delete_not_found(self):
resp = self.app.delete(
'/v2/cron_triggers/my_cron_trigger',
expect_errors=True
)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, "get_cron_triggers", MOCK_TRIGGERS)
def test_get_all(self):
resp = self.app.get('/v2/cron_triggers')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['cron_triggers']))
self.assertDictEqual(TRIGGER, resp.json['cron_triggers'][0])
@mock.patch.object(db_api, 'get_cron_triggers')
def test_get_all_operational_error(self, mocked_get_all):
mocked_get_all.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
[TRIGGER_DB] # Successful run
]
resp = self.app.get('/v2/cron_triggers')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['cron_triggers']))
self.assertDictEqual(TRIGGER, resp.json['cron_triggers'][0])
@mock.patch.object(db_api, 'get_cron_triggers')
@mock.patch('mistral.context.MistralContext.from_environ')
def test_get_all_projects_admin(self, mock_context, mock_get_triggers):
admin_ctx = unit_base.get_context(admin=True)
mock_context.return_value = admin_ctx
resp = self.app.get('/v2/cron_triggers?all_projects=true')
self.assertEqual(200, resp.status_int)
self.assertTrue(mock_get_triggers.call_args[1].get('insecure', False))
@mock.patch.object(db_api, 'get_cron_triggers')
@mock.patch('mistral.context.MistralContext.from_environ')
def test_get_all_filter_project(self, mock_context, mock_get_triggers):
admin_ctx = unit_base.get_context(admin=True)
mock_context.return_value = admin_ctx
resp = self.app.get(
'/v2/cron_triggers?all_projects=true&'
'project_id=192796e61c174f718d6147b129f3f2ff'
)
self.assertEqual(200, resp.status_int)
self.assertTrue(mock_get_triggers.call_args[1].get('insecure', False))
self.assertEqual(
{'eq': '192796e61c174f718d6147b129f3f2ff'},
mock_get_triggers.call_args[1].get('project_id')
)
@mock.patch.object(db_api, "get_cron_triggers", MOCK_EMPTY)
def test_get_all_empty(self):
resp = self.app.get('/v2/cron_triggers')
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(resp.json['cron_triggers']))
| openstack/mistral | mistral/tests/unit/api/v2/test_cron_triggers.py | Python | apache-2.0 | 8,830 | 0 |
"""
MobileRobots Advanced Robotics Interface for Applications (ARIA)
Copyright (C) 2004, 2005 ActivMedia Robotics LLC
Copyright (C) 2006, 2007, 2008, 2009 MobileRobots Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
If you wish to redistribute ARIA under different terms, contact
MobileRobots for information about a commercial version of ARIA at
robots@mobilerobots.com or
MobileRobots Inc, 10 Columbia Drive, Amherst, NH 03031; 800-639-9481
"""
from AriaPy import *
from ArNetworkingPy import *
import sys
from math import sin
# This is an example server that shows how to draw arbitrary figures in a
# client (e.g. MobileEyes).
# These are callbacks that respond to client requests for the drawings'
# geometry data.
def exampleHomeDrawingNetCallback(client, requestPkt):
print "exampleHomeDrawingNetCallback"
reply = ArNetPacket()
# 7 Vertices
reply.byte4ToBuf(7)
# Centered on 0,0.
# X: Y:
reply.byte4ToBuf(-500); reply.byte4ToBuf(500); # Vertex 1
reply.byte4ToBuf(-500); reply.byte4ToBuf(-500); # Vertex 2
reply.byte4ToBuf(500); reply.byte4ToBuf(-500); # Vertex 3
reply.byte4ToBuf(500); reply.byte4ToBuf(500); # Vertex 4
reply.byte4ToBuf(0); reply.byte4ToBuf(1000); # Vertex 5
reply.byte4ToBuf(-500); reply.byte4ToBuf(500); # Vertex 6
reply.byte4ToBuf(500); reply.byte4ToBuf(500); # Vertex 7
client.sendPacketUdp(reply)
print "exampleHomeDrawingNetCallback Done."
def exampleDotsDrawingNetCallback(client, requestPkt):
reply = ArNetPacket()
tik = ArUtil.getTime() % 200
t = tik / 5.0
# Three dots
reply.byte4ToBuf(3)
# Dot 1:
reply.byte4ToBuf(3000); # X coordinate (mm)
reply.byte4ToBuf((int) (sin(t) * 1000));# Y
# Dot 2:
reply.byte4ToBuf(3500); # X
reply.byte4ToBuf((int) (sin(t+500) * 1000));# Y
# Dot 3:
reply.byte4ToBuf(4000); # X
reply.byte4ToBuf((int) (sin(t+1000) * 1000));# Y
client.sendPacketUdp(reply)
def exampleXDrawingNetCallback(client, requestPkt):
reply = ArNetPacket()
# X marks the spot. 2 line segments, so 4 vertices:
reply.byte4ToBuf(4)
# Segment 1:
reply.byte4ToBuf(-4250); # X1
reply.byte4ToBuf(250); # Y1
reply.byte4ToBuf(-3750); # X2
reply.byte4ToBuf(-250); # Y2
# Segment 2:
reply.byte4ToBuf(-4250); # X1
reply.byte4ToBuf(-250); # Y1
reply.byte4ToBuf(-3750); # X2
reply.byte4ToBuf(250); # Y2
client.sendPacketUdp(reply)
def exampleArrowsDrawingNetCallback(client, requestPkt):
# 1 arrow that points at the robot
reply = ArNetPacket()
reply.byte4ToBuf(1) # 1 arrow
reply.byte4ToBuf(0); # Pos. X
reply.byte4ToBuf(700); # Pos. Y
client.sendPacketUdp(reply)
# Main program:
Aria.init()
robot = ArRobot()
server = ArServerBase()
parser = ArArgumentParser(sys.argv)
simpleConnector = ArSimpleConnector(parser)
simpleOpener = ArServerSimpleOpener(parser)
parser.loadDefaultArguments()
if not Aria.parseArgs() or not parser.checkHelpAndWarnUnparsed():
Aria.logOptions()
Aria.exit(1)
if not simpleOpener.open(server):
if simpleOpener.wasUserFileBad():
print "Error: Bad user/password/permissions file."
else:
print "Error: Could not open server port. Use -help to see options."
Aria.exit(1)
# Devices
sonarDev = ArSonarDevice()
robot.addRangeDevice(sonarDev)
irs = ArIRs()
robot.addRangeDevice(irs)
bumpers = ArBumpers()
robot.addRangeDevice(bumpers)
sick = ArSick()
robot.addRangeDevice(sick);
# attach services to the server
serverInfoRobot = ArServerInfoRobot(server, robot)
serverInfoSensor = ArServerInfoSensor(server, robot)
# This is the service that provides drawing data to the client.
drawings = ArServerInfoDrawings(server)
# Convenience function that sets up drawings for all the robot's current
# range devices (using default shape and color info)
drawings.addRobotsRangeDevices(robot)
# Add our custom drawings
linedd = ArDrawingData("polyLine", ArColor(255, 0, 0), 2, 49) # shape name, color, size, layer
drawings.addDrawing( linedd, "exampleDrawing_Home", exampleHomeDrawingNetCallback)
dotsdd = ArDrawingData("polyDots", ArColor(0, 255, 0), 250, 48)
drawings.addDrawing(dotsdd, "exampleDrawing_Dots", exampleDotsDrawingNetCallback)
segdd = ArDrawingData("polySegments", ArColor(0, 0, 0), 4, 52)
drawings.addDrawing( segdd, "exampleDrawing_XMarksTheSpot", exampleXDrawingNetCallback)
ardd = ArDrawingData("polyArrows", ArColor(255, 0, 255), 500, 100)
drawings.addDrawing( ardd, "exampleDrawing_Arrows", exampleArrowsDrawingNetCallback)
# modes for moving the robot
modeStop = ArServerModeStop(server, robot)
modeDrive = ArServerModeDrive(server, robot)
modeRatioDrive = ArServerModeRatioDrive(server, robot)
modeWander = ArServerModeWander(server, robot)
modeStop.addAsDefaultMode()
modeStop.activate()
# set up some simple commands ("custom commands")
commands = ArServerHandlerCommands(server)
uCCommands = ArServerSimpleComUC(commands, robot)
loggingCommands = ArServerSimpleComMovementLogging(commands, robot)
configCommands = ArServerSimpleComLogRobotConfig(commands, robot)
# add the commands to enable and disable safe driving to the simple commands
modeDrive.addControlCommands(commands)
# Connect to the robot.
if not simpleConnector.connectRobot(robot):
print "Error: Could not connect to robot... exiting"
Aria.shutdown()
sys.exit(1)
# set up the laser before handing it to the laser mode
simpleConnector.setupLaser(sick)
robot.enableMotors()
# start the robot cycle running in a background thread
robot.runAsync(True)
# start the laser processing cycle in a background thread
sick.runAsync()
# connect the laser if it was requested
if not simpleConnector.connectLaser(sick):
print "Error: Could not connect to laser... exiting"
Aria.shutdown()
sys.exit(1)
# log whatever we wanted to before the runAsync
simpleOpener.checkAndLog()
# run the server thread in the background
server.runAsync()
print "Server is now running on port %d..." % (simpleOpener.getPort())
robot.waitForRunExit()
Aria.shutdown()
| charismaticchiu/Robotics | ArNetworking/pythonExamples/drawingsExample.py | Python | gpl-2.0 | 6,718 | 0.018904 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asuna.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| rogeliorv/asuna | manage.py | Python | apache-2.0 | 250 | 0.004 |
# -*- coding: latin-1 -*-
import re
import math
import urllib
from string import join
import traceback, sys
class JsUnwiser:
def unwiseAll(self, data):
try:
in_data=data
sPattern = 'eval\\(function\\(w,i,s,e\\).*?}\\((.*?)\\)'
wise_data=re.compile(sPattern).findall(in_data)
for wise_val in wise_data:
unpack_val=self.unwise(wise_val)
#print '\nunpack_val',unpack_val
in_data=in_data.replace(wise_val,unpack_val)
return in_data
except:
traceback.print_exc(file=sys.stdout)
return data
def containsWise(self, data):
return 'w,i,s,e' in data
def unwise(self, sJavascript):
#print 'sJavascript',sJavascript
page_value=""
try:
ss="w,i,s,e=("+sJavascript+')'
exec (ss)
page_value=self.__unpack(w,i,s,e)
except: traceback.print_exc(file=sys.stdout)
return page_value
def __unpack( self,w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)#.join('');
I1lI = ''.join(l1lI)#.join('');
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
#print 'array i',lIll,len(ll1l)
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
#print 'val is ', lI1l[lIll: lIll+2]
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return self.unwise(ret)
else:
return ret
| siouka/dmind | plugin.video.tvpor/resources/lib/unwise.py | Python | gpl-2.0 | 2,506 | 0.027933 |
from django.conf.urls import patterns, url
from .views import info, useradmin, user_info, user_remove
urlpatterns = patterns(
'',
url('^info/$', info, name="info"),
url('^useradmin/$', useradmin, name="useradmin"),
url(r'^useradmin/info/(?P<email>[^/]+)/$', user_info, name='user_info'),
url(r'^useradmin/remove/(?P<email>[^/]+)/$', user_remove, name='user_remove'),
)
| saukrIppl/seahub | seahub/institutions/urls.py | Python | apache-2.0 | 391 | 0.002558 |
'''
urlresolver XBMC Addon
Copyright (C) 2013 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re, urllib2, os, xbmcgui
from urlresolver import common
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class PromptfileResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "promptfile"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html = self.net.http_GET(web_url).content
data = {}
r = re.findall(r'type="hidden"\s*name="(.+?)"\s*value="(.*?)"', html)
for name, value in r:
data[name] = value
html = self.net.http_POST(web_url, data).content
html = re.compile(r'clip\s*:\s*\{.*?url\s*:\s*[\"\'](.+?)[\"\']', re.DOTALL).search(html)
if not html:
raise Exception ('File Not Found or removed')
stream_url = html.group(1)
return stream_url
except urllib2.URLError, e:
common.addon.log_error(self.name + ': got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Promptfile Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]PROMPTFILE[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.promptfile.com/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search('//(.+?)/(.+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?promptfile.com/l/' +
'[0-9A-Za-z\-]+', url) or
'promptfile' in host)
| SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/script.module.urlresolver/lib/urlresolver/plugins/promptfile.py | Python | gpl-2.0 | 3,129 | 0.004155 |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.tests.unit.linuxbridge import test_linuxbridge_plugin
from neutron.tests.unit.openvswitch import test_agent_scheduler
class LbAgentSchedulerTestCase(
test_agent_scheduler.OvsAgentSchedulerTestCase):
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
l3_plugin = None
class LbL3AgentNotifierTestCase(
test_agent_scheduler.OvsL3AgentNotifierTestCase):
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
l3_plugin = None
class LbDhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase):
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
| shakamunyi/neutron-vrrp | neutron/tests/unit/linuxbridge/test_agent_scheduler.py | Python | apache-2.0 | 1,192 | 0.002517 |
{
'name' : 'Custom pos session report (2)',
'version' : '1.0.0',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category' : 'Custom',
'website' : 'https://yelizariev.github.io',
'description': """
Tested on Odoo 8.0 258a4cac82ef3b7e6a086f691f3bf8140d37b51c
""",
'data':[
'views/session_view.xml',
'views/pos_session_custom_report1.xml',
'views/report1.xml',
'views/layouts.xml',
],
'depends': ['base','point_of_sale'],
'init_xml': [],
'update_xml': [],
'installable': True,
}
| bmya/pos-addons | pos_session_custom2/__openerp__.py | Python | lgpl-3.0 | 583 | 0.012007 |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import struct
import six
from .errors import ReadError
skip = '_'
def none():
"""A ReadWriter that consumes nothing and returns None."""
return NoneReadWriter()
def constant(rw, value):
"""A ReadWriter that runs the given ReadWriter and ignores the value.
Always writes and returns ``value`` instead.
:param rw:
ReadWriter to run
:param value:
Value to serialize and return
"""
return ConstantReadWriter(rw, value)
def number(width_bytes):
"""Build a ReadWriter for integers of the given width.
:param width_bytes:
Width of the integer. One of 1, 2, 4 and 8.
"""
return NumberReadWriter(width_bytes)
def args(length_rw):
"""Build a ReadWriter for args=[arg1, arg2, arg3]
:param length_rw:
ReadWriter for the length of each arg
"""
return ArgsReaderWriter(length_rw)
def len_prefixed_string(length_rw, is_binary=False):
"""Build a ReadWriter for strings prefixed with their length.
.. code-block:: python
len_prefixed_string(number(2)) # == str~2
:param length_rw:
ReadWriter for the length of the string
:param is_binary:
Whether the string is a binary blob. If this is False (the default),
the string will be encoded/decoded to UTF-8 before writing/reading.
"""
return LengthPrefixedBlobReadWriter(length_rw, is_binary)
def chain(*rws):
"""Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters
"""
assert rws is not None
if len(rws) == 1 and isinstance(rws[0], list):
# In case someone does chain([l0, l1, ...])
rws = rws[0]
return ChainReadWriter(rws)
def dictionary(*pairs):
"""Build a ReadWriter that reads/writes dictionaries.
``pairs`` are tuples containing field names and their corresponding
ReadWriters. The fields will be read and written in the same order
provided here.
For example the following ReadWriter will read and write dictionaries in
the form ``{"flags": <byte>, "id": <int32>}``.
.. code-block:: python
dictionary(
("flags", number(1)),
("id", number(4)),
)
For pairs where the key name is `rw.skip`, the value will not be saved and
the serializer will receive None.
:param pairs:
One or more tuples in the from ``(<field name>, <ReadWriter>)``.
"""
return NamedChainReadWriter(pairs)
def instance(cls, *pairs):
"""Build a ReadWriter that reads/writes intances of the given class.
``pairs`` are key-value pairs that specify constructor argument names and
their corresponding ReadWriters. These same names are used to access
attributes on instances when writing.
.. code-block:: python
instance(
Person,
("name", len_prefixed_string(number(2))),
("age", number(1))
)
For pairs where the attribute name is `rw.skip`, the value will not be
passed to the constructor. Further, while serializing, None will be passed
to the serializer.
:param cls:
A class with an ``__init__`` method accepting keyword arguments for
all items specified in ``pairs``
:param pairs:
Key-value pairs mapping argument name to ReadWriter.
"""
return InstanceReadWriter(cls, pairs)
def headers(length_rw, key_rw, value_rw=None):
"""Build a ReadWriter for header lists.
A header is represented as::
count:L (key:K value:V){count}
The value produced is a list of key-value pairs. For example,
.. code-block:: python
headers(
number(L),
len_prefixed_string(number(K)),
len_prefixed_string(number(V)),
)
:param length_rw:
ReadWriter for the number of pairs in the header
:param key_rw:
ReadWriter for a key in a pair
:param value_rw:
ReadWriter for a value in a pair. Defaults to ``key_rw``.
"""
return HeadersReadWriter(length_rw, key_rw, value_rw)
def switch(switch_rw, cases):
"""A ReadWriter that picks behavior based on the value of ``switch_rw``.
.. code-block:: python
switch(
number(1), {
0: option_1_rw(),
1: option_2_rw()
}
)
Produces a tuple in the from ``(switch_value, case_value)``. If a given
switch value did not have a corresponding case, nothing will be written to
the stream and None will be returned as the value when reading.
:param switch_rw:
A ReadWriter that produces a value to dispatch on
:param cases:
Pairs where the key is the expected value from ``switch_rw``. If the
value matches, the corresponding ReadWriter will be executed.
"""
return SwitchReadWriter(switch_rw, cases)
class ReadWriter(object):
"""Provides the ability to read/write types from/to file-like objects.
ReadWrites SHOULD not maintain any state between calls to
``read``/``write`` and MUST be re-usable and thread-safe. The
``read``/``write`` methods MAY Be called on the same ReadWriter instance
multiple times for different requests at the same time.
The file-like stream object MUST provide ``read(int)`` and ``write(str)``
methods with behaviors as follows:
``read(int)``
MUST return the specified number of bytes from the stream. MAY return
fewer bytes if the end of the stream was reached.
``write(str)``
MUST write the given string or buffer to the stream.
"""
def read(self, stream):
"""Read and return the object from the stream.
:param stream:
file-like object providing a `read(int)` method
:returns: the deserialized object
:raises ReadError:
for parse errors or if the input is too short
"""
raise NotImplementedError()
def write(self, obj, stream):
"""Write the object to the stream.
:param stream:
file-like obect providing a `write(str)` method
:returns:
the stream
"""
raise NotImplementedError()
def length(self, obj):
"""Return the number of bytes will actually be written into io.
For cases where the width depends on the input, this should return the
length of data will be written into iostream."""
raise NotImplementedError()
def width(self):
"""Return the number of bytes this ReadWriter is expected to take.
For cases where the width depends on the input, this should return the
minimum width the ReadWriter is expected to take."""
raise NotImplementedError()
def take(self, stream, num):
"""Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected
"""
s = stream.read(num)
slen = len(s)
if slen != num:
raise ReadError(
"Expected %d bytes but got %d bytes." % (num, slen)
)
return s
class DelegatingReadWriterMeta(type):
def __new__(mcs, name, bases, dct):
if bases != (ReadWriter,):
# Children of this class MUST provide __rw__
assert dct.get('__rw__'), (
"%s.__rw__ must be set" % name
)
return type.__new__(mcs, name, bases, dct)
class DelegatingReadWriter(six.with_metaclass(DelegatingReadWriterMeta, ReadWriter)): # noqa
"""Allows mapping ReadWriters onto different types.
A common pattern is to define a base ReadWriter using the primitives from
this module and then map those onto custom types.
For example, consider a Person class.
.. code-block:: python
Person = namedtuple('Person', 'name age')
Given a ReadWriter that produces a ``(name, age)`` tuple, we want to map
it to/from Person object.
.. code-block:: python
class PersonReadWriter(DelegatingReadWriter):
__rw__ = # a ReadWriter that produces (name, age) tuples
def read(self, stream):
(name, age) = super(PersonReadWriter, self).read(stream)
return Person(name, age)
def write(self, person, stream):
super(PersonReadWriter, self).write(
(person.name, person.age),
stream,
)
"""
# The underlying ReadWriter. All calls will be delegated to this.
__rw__ = None
def read(self, stream):
return self.__rw__.read(stream)
def write(self, obj, stream):
self.__rw__.write(obj, stream)
return stream
def width(self):
return self.__rw__.width()
def length(self, obj):
return self.__rw__.length(obj)
class NumberReadWriter(ReadWriter):
"""See :py:func:`number` for documentation."""
_FORMATS = {
1: '>B',
2: '>H',
4: '>I',
8: '>Q',
}
__slots__ = ('_width', '_format')
def __init__(self, width_bytes):
assert width_bytes in self._FORMATS, (
"Unsupported integer width '%d'" % width_bytes
)
self._width = width_bytes
self._format = self._FORMATS[width_bytes]
def read(self, stream):
return struct.unpack(self._format, self.take(stream, self._width))[0]
def write(self, num, stream):
# Cast to int just in case the value is still a float
stream.write(struct.pack(self._format, int(num)))
return stream
def width(self):
return self._width
def length(self, obj):
return self._width
class ArgsReaderWriter(ReadWriter):
def __init__(self, length_rw, num=3):
assert length_rw is not None
self._length_rw = length_rw
self._rw = len_prefixed_string(self._length_rw,
is_binary=True)
self.num = num
def read(self, stream):
args = []
try:
for _ in range(self.num):
args.append(self._rw.read(stream))
except ReadError:
pass
return args
def write(self, args, stream):
for arg in args:
if arg is None:
arg = ""
self._rw.write(arg, stream)
def width(self):
return self.num * self._length_rw.width()
def length(self, args):
size = 0
for arg in args:
if arg is None:
arg = ""
size += self._rw.length(arg)
return size
class LengthPrefixedBlobReadWriter(ReadWriter):
"""See :py:func:`len_prefixed_string` for documentation."""
__slots__ = ('_length', '_is_binary')
def __init__(self, length_rw, is_binary=False):
assert length_rw is not None
self._length = length_rw
self._is_binary = is_binary
def read(self, stream):
length = self._length.read(stream)
if length == 0:
return b""
else:
blob = self.take(stream, length)
if not self._is_binary:
blob = blob.decode('utf-8')
return blob
def write(self, s, stream):
if six.PY2 and not self._is_binary:
s = s.encode('utf-8')
if six.PY3 and isinstance(s, str):
s = s.encode('utf-8')
length = len(s)
self._length.write(length, stream)
stream.write(s)
return stream
def width(self):
return self._length.width()
def length(self, s):
if not self._is_binary:
if six.PY2:
s = s.encode('utf-8')
if six.PY3 and isinstance(s, str) or s is None:
s = s.encode('utf-8')
return len(s) + self._length.width()
class ChainReadWriter(ReadWriter):
"""See :py:func:`chain` for documentation."""
__slots__ = ('_links',)
def __init__(self, links):
assert links is not None
self._links = tuple(links)
def read(self, stream):
return [link.read(stream) for link in self._links]
def write(self, items, stream):
assert len(items) == len(self._links)
for item, link in zip(items, self._links):
link.write(item, stream)
return stream
def width(self):
return sum(link.width() for link in self._links)
def length(self, items):
assert len(items) == len(self._links)
size = 0
for item, link in zip(items, self._links):
size += link.length(item)
return size
class NamedChainReadWriter(ReadWriter):
"""See :py:func:`dictionary` for documentation."""
__slots__ = ('_pairs',)
def __init__(self, pairs):
assert pairs is not None
self._pairs = pairs
def read(self, stream):
result = {}
for name, rw in self._pairs:
try:
value = rw.read(stream)
if name != skip:
result[name] = value
except ReadError as e:
raise ReadError("Failed to read %s: %s" % (name, e))
return result
def write(self, obj, stream):
for name, rw in self._pairs:
if name != skip:
rw.write(obj[name], stream)
else:
rw.write(None, stream)
return stream
def width(self):
return sum(rw.width() for _, rw in self._pairs)
def length(self, obj):
size = 0
for name, rw in self._pairs:
if name != skip:
size += rw.length(obj[name])
else:
size += rw.length(None)
return size
class InstanceReadWriter(ReadWriter):
__slots__ = ('_cls', '_pairs',)
def __init__(self, cls, pairs):
self._pairs = pairs
self._cls = cls
def read(self, stream):
kwargs = {}
try:
for attr, rw in self._pairs:
value = rw.read(stream)
if attr != skip:
kwargs[attr] = value
except ReadError as e:
raise ReadError("Failed to read %s: %s" % (self._cls, e))
return self._cls(**kwargs)
def write(self, obj, stream):
for attr, rw in self._pairs:
if attr != skip:
value = getattr(obj, attr)
rw.write(value, stream)
else:
rw.write(None, stream)
return stream
def width(self):
return sum(rw.width() for _, rw in self._pairs)
def length(self, obj):
size = 0
for attr, rw in self._pairs:
if attr != skip:
value = getattr(obj, attr)
size += rw.length(value)
else:
size += rw.length(None)
return size
def length_no_args(self, obj):
size = 0
for attr, rw in self._pairs:
if attr == "args":
continue
if attr != skip:
value = getattr(obj, attr)
size += rw.length(value)
else:
size += rw.length(None)
return size
class HeadersReadWriter(ReadWriter):
"""See :py:func:`headers` for documentation."""
__slots__ = ('_length', '_key', '_value')
def __init__(self, length_rw, key_rw, value_rw=None):
self._length = length_rw
self._pair = chain(key_rw, value_rw or key_rw)
def read(self, stream):
count = self._length.read(stream)
headers = []
for i in range(count):
headers.append(self._pair.read(stream))
return headers
def write(self, headers, stream):
# In case someone does write({..}, stream)
if isinstance(headers, dict):
headers = list(headers.items())
self._length.write(len(headers), stream)
for pair in headers:
self._pair.write(pair, stream)
return stream
def width(self):
return self._length.width()
def length(self, headers):
size = 0
if isinstance(headers, dict):
headers = list(headers.items())
size += self._length.length(len(headers))
for pair in headers:
size += self._pair.length(pair)
return size
class NoneReadWriter(ReadWriter):
def read(self, stream):
return None
def write(self, _, stream):
return stream
def width(self):
return 0
def length(self, obj):
return 0
class ConstantReadWriter(ReadWriter):
__slots__ = ('_rw', '_value')
def __init__(self, rw, value):
self._rw = rw
self._value = value
def read(self, stream):
self._rw.read(stream)
return self._value
def write(self, out, stream):
self._rw.write(self._value, stream)
return stream
def width(self):
return self._rw.width()
def length(self, obj):
return self._rw.width()
class SwitchReadWriter(ReadWriter):
__slots__ = ('_switch', '_cases')
def __init__(self, switch_rw, cases_rw):
self._switch = switch_rw
self._cases = cases_rw
def read(self, stream):
k = self._switch.read(stream)
if k in self._cases:
v = self._cases[k].read(stream)
return (k, v)
else:
return (k, None)
def write(self, item, stream):
k, v = item
self._switch.write(k, stream)
if v is not None and k in self._cases:
self._cases[k].write(v, stream)
return stream
def width(self):
return self._switch.width()
def length(self, item):
k, v = item
size = 0
size += self._switch.length(k)
if v is not None and k in self._cases:
size += self._cases[k].length(v)
return size
| uber/tchannel-python | tchannel/rw.py | Python | mit | 19,427 | 0 |
from django.conf.urls import url
from redisca.frontend import views
app_name = 'frontend'
urlpatterns = [
url(r'^$', views.template_list, name='template_list'),
url(r'^([a-zA-Z0-9_\./\-]+)$', views.static_template, name='template'),
]
| redisca/django-redisca | redisca/frontend/urls.py | Python | mit | 246 | 0 |
#open a gsd file and write out a subsampled version, keeping only every N timesteps
#useful if you want to be analyzing a shorter trajectory
import gsd.hoomd
import argparse
import time
start = time.time()
parser = argparse.ArgumentParser(description='Subsamble GSD trajectory')
parser.add_argument('fname',metavar='input',type=str,help='trajectory file to be subsampled')
parser.add_argument('ofname',metavar='output',type=str,help='where to write subsampled trajectory file')
parser.add_argument('N',metavar='N',type=int,help='keep frame each N timesteps')
args = parser.parse_args()
traj = gsd.hoomd.open(args.fname)
frame0 = traj[0]
newtraj = gsd.hoomd.open(args.ofname,'wb')
newtraj.append(frame0)
for i in range(args.N,len(traj),args.N):
s = gsd.hoomd.Snapshot()
pos = traj[i].particles.position
s.particles.position = pos
s.particles.N = len(pos)
newtraj.append(s)
end = time.time()
print('Subsampling took {0} s.'.format(end-start))
| ramansbach/cluster_analysis | clustering/scripts/gsdSubsample.py | Python | mit | 959 | 0.023983 |
import unittest2 as unittest
from ..models import TestStorage
from ..strategy import TestStrategy
from ...backends.utils import load_backends, get_backend
from ...backends.github import GithubOAuth2
from ...exceptions import MissingBackend
class BaseBackendUtilsTest(unittest.TestCase):
def setUp(self):
self.strategy = TestStrategy(storage=TestStorage)
def tearDown(self):
self.strategy = None
class LoadBackendsTest(BaseBackendUtilsTest):
def test_load_backends(self):
loaded_backends = load_backends((
'social_core.backends.github.GithubOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.flickr.FlickrOAuth'
), force_load=True)
keys = list(loaded_backends.keys())
self.assertEqual(keys, ['github', 'facebook', 'flickr'])
backends = ()
loaded_backends = load_backends(backends, force_load=True)
self.assertEqual(len(list(loaded_backends.keys())), 0)
class GetBackendTest(BaseBackendUtilsTest):
def test_get_backend(self):
backend = get_backend((
'social_core.backends.github.GithubOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.flickr.FlickrOAuth'
), 'github')
self.assertEqual(backend, GithubOAuth2)
def test_get_missing_backend(self):
with self.assertRaisesRegexp(MissingBackend,
'Missing backend "foobar" entry'):
get_backend(('social_core.backends.github.GithubOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.flickr.FlickrOAuth'),
'foobar')
| tobias47n9e/social-core | social_core/tests/backends/test_utils.py | Python | bsd-3-clause | 1,767 | 0 |
"""
Methods for importing Helios data.
"""
from datetime import date, time, datetime, timedelta
import os
import pathlib
import urllib.error
from urllib.error import URLError
from collections import OrderedDict
import warnings
import astropy.constants as constants
import astropy.units as u
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
from heliopy import config
from heliopy.data import util
from heliopy.data import cdasrest
data_dir = config['download_dir']
use_hdf = config['use_hdf']
helios_dir = os.path.join(data_dir, 'helios')
# new http base_url
remote_base_url = 'https://helios-data.ssl.berkeley.edu/data/'
def _check_probe(probe):
probe = str(probe)
assert probe == '1' or probe == '2', 'Probe number must be 1 or 2'
return probe
def _dist_file_dir(probe, year, doy):
return os.path.join(helios_dir,
'helios{}'.format(probe),
'dist',
'{}'.format(year),
'{}'.format(int(doy)))
def _loaddistfile(probe, year, doy, hour, minute, second):
"""
Method to load a Helios distribution file.
Returns opened file and location of file if file exists. If file doesn't
exist raises an OSError.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
year : int
Year
doy : int
Day of year
hour : int
Hour.
minute : int
Minute
second : int
Second
Returns
-------
f : file
Opened distribution function file
filename : str
Filename of opened file
"""
probe = _check_probe(probe)
# Work out location of file
yearstring = str(year)[-2:]
filedir = _dist_file_dir(probe, year, doy)
filename = os.path.join(filedir,
'h' + probe + 'y' + yearstring +
'd' + str(doy).zfill(3) +
'h' + str(hour).zfill(2) +
'm' + str(minute).zfill(2) +
's' + str(second).zfill(2) + '_')
# Try to open distribution file
for extension in ['hdm.0', 'hdm.1', 'ndm.0', 'ndm.1']:
try:
f = open(filename + extension)
filename += extension
except OSError:
continue
if 'f' not in locals():
raise OSError('Could not find file with name ' +
filename[:-1])
else:
return f, filename
def _dist_filename_to_hms(path):
"""Given distribution filename, extract hour, minute, second"""
# year = int(path[-21:-19]) + 1900
# doy = int(path[-18:-15])
hour = int(path[-14:-12])
minute = int(path[-11:-9])
second = int(path[-8:-6])
return hour, minute, second
def integrated_dists(probe, starttime, endtime, verbose=False):
"""
Returns the integrated distributions from experiments i1a and i1b in Helios
distribution function files.
The distributions are integrated over all angles and given as a function
of proton velocity.
Parameters
----------
probe : int
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Start of interval
endtime : datetime.datetime
End of interval
verbose : bool
If ``True``, print information whilst loading. Default is ``False``.
Returns
-------
distinfo : pandas.Series
Infromation stored in the top of distribution function files.
"""
extensions = ['hdm.0', 'hdm.1', 'ndm.0', 'ndm.1']
distlist = {'a': [], 'b': []}
starttime_orig = starttime
# Loop through each day
while starttime < endtime:
year = starttime.year
doy = starttime.strftime('%j')
# Directory for today's distribution files
dist_dir = _dist_file_dir(probe, year, doy)
# Locaiton of hdf file to save to/load from
hdffile = 'h' + probe + str(year) + str(doy).zfill(3) +\
'integrated_dists.hdf'
hdffile = os.path.join(dist_dir, hdffile)
todays_dists = {'a': [], 'b': []}
# Check if data is already saved
if os.path.isfile(hdffile):
for key in todays_dists:
todays_dists[key] = pd.read_hdf(hdffile, key=key)
distlist[key].append(todays_dists[key])
starttime += timedelta(days=1)
continue
# If not saved, generate a derived file
else:
# Get every distribution function file present for this day
for f in os.listdir(dist_dir):
path = os.path.join(dist_dir, f)
# Check for distribution function
if path[-5:] in extensions:
hour, minute, second = _dist_filename_to_hms(path)
try:
a, b = integrated_dists_single(probe, year, doy,
hour, minute, second)
except RuntimeError as err:
strerr = 'No ion distribution function data in file'
if str(err) == strerr:
continue
raise err
t = datetime.combine(starttime.date(),
time(hour, minute, second))
if verbose:
print(t)
dists = {'a': a, 'b': b}
for key in dists:
dist = dists[key]
dist['Time'] = t
dist = dist.set_index(['Time', 'v'], drop=True)
todays_dists[key].append(dist)
# Go through a and b and concat all the data
for key in todays_dists:
todays_dists[key] = pd.concat(todays_dists[key])
if use_hdf:
todays_dists[key].to_hdf(hdffile, key=key, mode='a')
distlist[key].append(todays_dists[key])
starttime += timedelta(days=1)
for key in distlist:
distlist[key] = util.timefilter(distlist[key], starttime_orig, endtime)
return distlist
def integrated_dists_single(probe, year, doy, hour, minute, second):
"""
Returns the integrated distributions from experiments i1a and i1b in Helios
distribution function files.
The distributions are integrated over all angles and given as a function
of proton velocity.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
year : int
Year
doy : int
Day of year
hour : int
Hour
minute : int
Minute.
second : int
Second
Returns
-------
i1a : pandas.DataFrame
i1a integrated distribution function.
i1b : pandas.DataFrame
i1b integrated distribution function.
"""
probe = _check_probe(probe)
f, _ = _loaddistfile(probe, year, doy, hour, minute, second)
for line in f:
if line[0:19] == ' 1-D i1a integrated':
break
# i1a distribution function
i1adf = f.readline().split()
f.readline()
i1avs = f.readline().split()
f.readline()
# i1b distribution file
i1bdf = f.readline().split()
f.readline()
i1bvs = f.readline().split()
i1a = pd.DataFrame({'v': i1avs, 'df': i1adf}, dtype=float)
i1b = pd.DataFrame({'v': i1bvs, 'df': i1bdf}, dtype=float)
f.close()
return i1a, i1b
def electron_dist_single(probe, year, doy, hour, minute, second,
remove_advect=False):
"""
Read in 2D electron distribution function.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
year : int
Year
doy : int
Day of year
hour : int
Hour.
minute : int
Minute
second : int
Second
remove_advect : bool
If ``False``, the distribution is returned in
the spacecraft frame.
If ``True``, the distribution is
returned in the solar wind frame, by subtracting the spacecraft
velocity from the velcoity of each bin. Note this significantly
slows down reading in the distribution.
Returns
-------
dist : pandas.DataFrame
2D electron distribution function
"""
probe = _check_probe(probe)
f, filename = _loaddistfile(probe, year, doy, hour, minute, second)
startline = None
for i, line in enumerate(f):
# Find start of electron distribution function
if line[0:4] == ' 2-D':
startline = i + 2
# Throw away next line (just has max of distribution)
f.readline()
# Throw away next line (just has table headings)
if f.readline()[0:27] == ' no electron data available':
return None
break
nlines = None
for i, line in enumerate(f):
if 'Degree, Pizzo correction' in line:
break
nlines = i + 1
if startline is None:
return None
##########################################
# Read and process electron distribution #
##########################################
# Arguments for reading in data
readargs = {'usecols': [0, 1, 2, 3, 4, 5],
'names': ['Az', 'E_bin', 'pdf', 'counts', 'vx', 'vy'],
'delim_whitespace': True,
'skiprows': startline,
'nrows': nlines}
# Read in data
dist = pd.read_csv(filename, **readargs)
if dist.empty:
return None
# Remove spacecraft abberation
# Assumes that spacecraft motion is always in the ecliptic (x-y)
# plane
if remove_advect:
params = distparams_single(probe, year, doy, hour, minute, second)
dist['vx'] += params['helios_vr']
dist['vy'] += params['helios_v']
# Convert to SI units
dist[['vx', 'vy']] *= 1e3
dist['pdf'] *= 1e12
# Calculate spherical coordinates of energy bins
dist['|v|'], _, dist['phi'] =\
util._cart2sph(dist['vx'], dist['vy'], 0)
# Calculate bin energy assuming particles are electrons
dist['E_electron'] = 0.5 * constants.m_e.value *\
((dist['|v|']) ** 2)
# Convert to multi-index using Azimuth and energy bin
dist = dist.set_index(['E_bin', 'Az'])
f.close()
return dist
def distparams(probe, starttime, endtime, verbose=False):
"""
Read in distribution parameters found in the header of distribution files.
Parameters
----------
probe : int
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Start of interval
endtime : datetime.datetime
End of interval
verbose : bool
If ``True``, print information whilst loading. Default is ``False``.
Returns
-------
distinfo : pandas.Series
Infromation stored in the top of distribution function files
"""
extensions = ['hdm.0', 'hdm.1', 'ndm.0', 'ndm.1']
paramlist = []
starttime_orig = starttime
# Loop through each day
while starttime < endtime:
year = starttime.year
doy = starttime.strftime('%j')
# Directory for today's distribution files
dist_dir = _dist_file_dir(probe, year, doy)
# Locaiton of hdf file to save to/load from
hdffile = 'h' + probe + str(year) + str(doy).zfill(3) +\
'distparams.hdf'
hdffile = os.path.join(dist_dir, hdffile)
if os.path.isfile(hdffile):
todays_params = pd.read_hdf(hdffile)
elif not os.path.isdir(dist_dir):
starttime += timedelta(days=1)
continue
else:
todays_params = []
# Get every distribution function file present for this day
for f in os.listdir(dist_dir):
path = os.path.join(dist_dir, f)
# Check for distribution function
if path[-5:] in extensions:
hour, minute, second = _dist_filename_to_hms(path)
if verbose:
print(starttime.date(), hour, minute, second)
p = distparams_single(probe, year, doy,
hour, minute, second)
todays_params.append(p)
todays_params = pd.concat(todays_params,
ignore_index=True, axis=1).T
todays_params = todays_params.set_index('Time', drop=False)
# Convert columns to numeric types
todays_params = todays_params.apply(pd.to_numeric, errors='ignore')
todays_params['Time'] = pd.to_datetime(todays_params['Time'])
if use_hdf:
todays_params.to_hdf(hdffile, key='distparams', mode='w')
paramlist.append(todays_params)
starttime += timedelta(days=1)
return util.timefilter(paramlist, starttime_orig, endtime)
def distparams_single(probe, year, doy, hour, minute, second):
"""
Read in parameters from a single distribution function measurement.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
year : int
Year
doy : int
Day of year
hour : int
Hour
minute : int
Minute
second : int
Second
Returns
-------
distparams : pandas.Series
Distribution parameters from top of distribution function file.
"""
probe = _check_probe(probe)
f, _ = _loaddistfile(probe, year, doy, hour, minute, second)
_, month, day = util.doy2ymd(year, doy)
dtime = datetime(year, month, day, hour, minute, second)
distparams = pd.Series(dtime, index=['Time'])
# Ignore the Pizzo et. al. correction at top of file
for _ in range(0, 3):
f.readline()
# Line of flags
flags = f.readline().split()
distparams['imode'] = int(flags[0])
# Alternating energy/azimuth shift on?
distparams['ishift'] = bool(flags[1])
# Possibly H2 abberation shift?
distparams['iperihelion_shift'] = bool(flags[2])
# Indicates a HDM file which contained bad data (frames), but could be
# handled as NDM file
distparams['minus'] = int(flags[3])
# 0 = no instrument, 1 = i1a, 2 = I3
distparams['ion_instrument'] = int(flags[4])
distparams['data_rate'] = 1 if ('hdm' in f.name) else 0
# 2 lines of Helios location information
location = f.readline().split()
distparams['r_sun'] = float(location[0]) # Heliospheric distance (AU)
distparams['clong'] = float(location[1]) # Carrington longitude (deg)
distparams['clat'] = float(location[2]) # Carrington lattitude (deg)
distparams['carrot'] = int(f.readline().split()[0]) # Carrington cycle
# 2 lines of Earth location information
earth_loc = f.readline().split()
# Heliospheric distance (AU)
distparams['earth_rsun'] = float(earth_loc[0])
# Carrington longitude (deg)
distparams['earth_clong'] = float(earth_loc[1])
# Carrington lattitude (deg)
distparams['earth_clat'] = float(earth_loc[2])
earth_loc = f.readline().split()
# Angle between Earth and Helios (deg)
distparams['earth_he_angle'] = float(earth_loc[0])
# Carrington rotation
distparams['earth_carrot'] = int(earth_loc[1])
# Helios velocity information
helios_v = f.readline().split()
# Helios radial velocity (km/s)
distparams['helios_vr'] = float(helios_v[0]) * 1731
# Helios tangential velocity (km/s)
distparams['helios_v'] = float(helios_v[1]) * 1731
# i1a integrated ion parameters
i1a_proton_params = f.readline().split()
# Proton number density (cm^-3)
distparams['np_i1a'] = float(i1a_proton_params[0])
# Proton velocity (km/s)
distparams['vp_i1a'] = float(i1a_proton_params[1])
# Proton temperature (K)
distparams['Tp_i1a'] = float(i1a_proton_params[2])
i1a_proton_params = f.readline().split()
# Proton azimuth flow angle (deg)
distparams['v_az_i1a'] = float(i1a_proton_params[0])
# Proton elevation flow angle (deg)
distparams['v_el_i1a'] = float(i1a_proton_params[1])
assert distparams['v_az_i1a'] < 360,\
'Flow azimuth must be less than 360 degrees'
# i1a integrated alpha parameters (possibly all zero?)
i1a_alpha_params = f.readline().split()
# Alpha number density (cm^-3)
distparams['na_i1a'] = float(i1a_alpha_params[0])
# Alpha velocity (km/s)
distparams['va_i1a'] = float(i1a_alpha_params[1])
# Alpha temperature (K)
distparams['Ta_i1a'] = float(i1a_alpha_params[2])
# i1b integrated ion parameters
i1b_proton_params = f.readline().split()
# Proton number density (cm^-3)
distparams['np_i1b'] = float(i1b_proton_params[0])
# Proton velocity (km/s)
distparams['vp_i1b'] = float(i1b_proton_params[1])
# Proton temperature (K)
distparams['Tp_i1b'] = float(i1b_proton_params[2])
# Magnetic field (out by a factor of 10 in data files for some reason)
B = f.readline().split()
distparams['Bx'] = float(B[0]) / 10
distparams['By'] = float(B[1]) / 10
distparams['Bz'] = float(B[2]) / 10
sigmaB = f.readline().split()
distparams['sigmaBx'] = float(sigmaB[0]) / 10
distparams['sigmaBy'] = float(sigmaB[1]) / 10
distparams['sigmaBz'] = float(sigmaB[2]) / 10
# Replace bad values with nans
to_replace = {'Tp_i1a': [-1.0, 0],
'np_i1a': [-1.0, 0],
'vp_i1a': [-1.0, 0],
'Tp_i1b': [-1.0, 0],
'np_i1b': [-1.0, 0],
'vp_i1b': [-1.0, 0],
'sigmaBx': -0.01, 'sigmaBy': -0.01, 'sigmaBz': -0.01,
'Bx': 0.0, 'By': 0.0, 'Bz': 0.0,
'v_az_i1a': [-1, 0], 'v_el_i1a': [-1, 0],
'na_i1a': [-1, 0], 'va_i1a': [-1, 0], 'Ta_i1a': [-1, 0]}
distparams = distparams.replace(to_replace, np.nan)
f.close()
return distparams
def electron_dists(probe, starttime, endtime, remove_advect=False,
verbose=False):
"""
Return 2D electron distributions between *starttime* and *endtime*
Parameters
----------
probe : int
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Start of interval
endtime : datetime.datetime
End of interval
remove_advect : bool
If *False*, the distribution is returned in
the spacecraft frame.
If *True*, the distribution is
returned in the solar wind frame, by subtracting the spacecraft
velocity from the velcoity of each bin. Note this significantly
slows down reading in the distribution.
verbose : bool
If ``True``, print dates when loading files. Default is ``False``.
Returns
-------
dists : pandas.DataFrame
Electron distribution functions
"""
extensions = ['hdm.0', 'hdm.1', 'ndm.0', 'ndm.1']
distlist = []
# Loop through each day
starttime_orig = starttime
while starttime < endtime:
year = starttime.year
doy = starttime.strftime('%j')
if verbose:
print('Loading electron dists from year', year, 'doy', doy)
# Directory for today's distribution files
dist_dir = _dist_file_dir(probe, year, doy)
print(dist_dir)
# If directory doesn't exist, print error and continue
if not os.path.exists(dist_dir):
print('No electron distributions available for year', year,
'doy', doy)
starttime += timedelta(days=1)
continue
# Locaiton of hdf file to save to/load from
hdffile = 'h' + probe + str(year) + str(doy).zfill(3) +\
'electron_dists.hdf'
hdffile = os.path.join(dist_dir, hdffile)
if os.path.isfile(hdffile):
todays_dist = pd.read_hdf(hdffile)
distlist.append(todays_dist)
starttime += timedelta(days=1)
continue
todays_dist = []
# Get every distribution function file present for this day
for f in os.listdir(dist_dir):
path = os.path.join(dist_dir, f)
# Check for distribution function
if path[-5:] in extensions:
hour, minute, second = _dist_filename_to_hms(path)
try:
d = electron_dist_single(probe, year, doy,
hour, minute, second)
except RuntimeError as err:
strerr = 'No electron distribution function data in file'
if str(err) == strerr:
continue
raise err
if d is None:
continue
t = datetime.combine(starttime.date(),
time(hour, minute, second))
d['Time'] = t
if verbose:
print(t)
todays_dist.append(d)
if todays_dist == []:
starttime += timedelta(days=1)
continue
todays_dist = pd.concat(todays_dist)
todays_dist = todays_dist.set_index('Time', append=True)
if use_hdf:
todays_dist.to_hdf(hdffile, key='electron_dists', mode='w')
distlist.append(todays_dist)
starttime += timedelta(days=1)
if distlist == []:
raise RuntimeError('No electron data available for times ' +
str(starttime_orig) + ' to ' + str(endtime))
return util.timefilter(distlist, starttime_orig, endtime)
def ion_dists(probe, starttime, endtime, remove_advect=False, verbose=False):
"""
Return 3D ion distributions between *starttime* and *endtime*
Parameters
----------
probe : int
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Start of interval
endtime : datetime.datetime
End of interval
remove_advect : bool
If *False*, the distribution is returned in
the spacecraft frame.
If *True*, the distribution is
returned in the solar wind frame, by subtracting the spacecraft
velocity from the velcoity of each bin. Note this significantly
slows down reading in the distribution.
verbose : bool
If ``True``, print dates when loading files. Default is ``False``.
Returns
-------
distinfo : pandas.Series
Infromation stored in the top of distribution function files.
"""
extensions = ['hdm.0', 'hdm.1', 'ndm.0', 'ndm.1']
distlist = []
# Loop through each day
starttime_orig = starttime
while starttime < endtime:
year = starttime.year
doy = int(starttime.strftime('%j'))
if verbose:
print('Loading ion dists from year', year, 'doy', doy)
# Directory for today's distribution files
dist_dir = _dist_file_dir(probe, year, doy)
# If directory doesn't exist, print error and continue
if not os.path.exists(dist_dir):
print('No ion distributions available for year', year, 'doy', doy)
starttime += timedelta(days=1)
continue
# Locaiton of hdf file to save to/load from
hdffile = 'h' + probe + str(year) + str(doy).zfill(3) +\
'ion_dists.hdf'
hdffile = os.path.join(dist_dir, hdffile)
if os.path.isfile(hdffile):
todays_dist = pd.read_hdf(hdffile)
distlist.append(todays_dist)
starttime += timedelta(days=1)
continue
todays_dist = []
# Get every distribution function file present for this day
for f in os.listdir(dist_dir):
path = os.path.join(dist_dir, f)
# Check for distribution function
if path[-5:] in extensions:
hour, minute, second = _dist_filename_to_hms(path)
try:
d = ion_dist_single(probe, year, doy,
hour, minute, second)
except RuntimeError as err:
strerr = 'No ion distribution function data in file'
if str(err) == strerr:
continue
raise err
t = datetime.combine(starttime.date(),
time(hour, minute, second))
d['Time'] = t
if verbose:
print(t)
todays_dist.append(d)
if todays_dist == []:
starttime += timedelta(days=1)
continue
todays_dist = pd.concat(todays_dist)
todays_dist = todays_dist.set_index('Time', append=True)
if use_hdf:
todays_dist.to_hdf(hdffile, key='ion_dist', mode='w')
distlist.append(todays_dist)
starttime += timedelta(days=1)
if distlist == []:
raise RuntimeError('No data available for times ' +
str(starttime_orig) + ' to ' + str(endtime))
return util.timefilter(distlist, starttime_orig, endtime)
def ion_dist_single(probe, year, doy, hour, minute, second,
remove_advect=False):
"""
Read in ion distribution function.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
year : int
Year
doy : int
Day of year
hour : int
Hour
minute : int
Minute.
second : int
Second
remove_advect : bool
If *False*, the distribution is returned in
the spacecraft frame.
If *True*, the distribution is
returned in the solar wind frame, by subtracting the spacecraft
velocity from the velcoity of each bin. Note this significantly
slows down reading in the distribution.
Returns
-------
dist : pandas.DataFrame
3D ion distribution function
"""
probe = _check_probe(probe)
f, filename = _loaddistfile(probe, year, doy, hour, minute, second)
nionlines = None # Number of lines in ion distribution
linesread = 0 # Stores the total number of lines read in the file
# Loop through file to find end of ion distribution function
for i, line in enumerate(f):
# Find start of proton distribution function
if line[0:23] == 'Maximum of distribution':
ionstartline = i + 1
# Find number of lines in ion distribution function
if line[0:4] == ' 2-D':
nionlines = i - ionstartline
break
linesread += i
# Bizzare case where there are two proton distributions in one file,
# or there's no electron data available
for i, line in enumerate(f):
if line[0:23] == 'Maximum of distribution' or\
line[0:30] == ' 1.2 Degree, Pizzo correction' or\
line[0:30] == ' -1.2 Degree, Pizzo correction':
warnings.warn("More than one ion distribution function found",
RuntimeWarning)
# NOTE: Bodge
linesread -= 1
break
f.close()
# If there's no electron data to get number of lines, set end of ion
# distribution function to end of file
if nionlines is None:
nionlines = i - ionstartline + 1
#####################################
# Read and process ion distribution #
#####################################
# If no ion data in file
if nionlines < 1:
raise RuntimeError('No ion distribution function data in file')
# Arguments for reading in data
readargs = {'usecols': [0, 1, 2, 3, 4, 5, 6, 7],
'names': ['Az', 'El', 'E_bin', 'pdf', 'counts',
'vx', 'vy', 'vz'],
'delim_whitespace': True,
'skiprows': ionstartline,
'nrows': nionlines}
# Read in data
dist = pd.read_csv(filename, **readargs)
# Remove spacecraft abberation
# Assumes that spacecraft motion is always in the ecliptic (x-y)
# plane
if remove_advect:
params = distparams_single(probe, year, doy, hour, minute, second)
dist['vx'] += params['helios_vr']
dist['vy'] += params['helios_v']
# Convert to SI units
dist[['vx', 'vy', 'vz']] *= 1e3
dist['pdf'] *= 1e12
# Calculate magnitude, elevation and azimuth of energy bins
dist['|v|'], dist['theta'], dist['phi'] =\
util._cart2sph(dist['vx'], dist['vy'], dist['vz'])
# Calculate bin energy assuming particles are protons
dist['E_proton'] = 0.5 * constants.m_p.value * ((dist['|v|']) ** 2)
# Convert to multi-index using azimuth, elevation, and energy bins
dist = dist.set_index(['E_bin', 'El', 'Az'])
return dist
class _CoreFitDownloader(util.Downloader):
def __init__(self, probe):
self.probe = _check_probe(probe)
self.units = OrderedDict([
('B instrument', u.dimensionless_unscaled),
('Bx', u.nT), ('By', u.nT), ('Bz', u.nT),
('sigma B', u.nT),
('Ion instrument', u.dimensionless_unscaled),
('Status', u.dimensionless_unscaled),
('Tp_par', u.K), ('Tp_perp', u.K),
('carrot', u.dimensionless_unscaled),
('r_sun', u.AU), ('clat', u.deg),
('clong', u.deg), ('earth_he_angle', u.deg),
('n_p', u.cm**-3), ('vp_x', u.km / u.s),
('vp_y', u.km / u.s), ('vp_z', u.km / u.s),
('vth_p_par', u.km / u.s), ('vth_p_perp', u.km / u.s)])
def intervals(self, starttime, endtime):
return self.intervals_daily(starttime, endtime)
def fname(self, interval):
year = interval.start.strftime('%Y')
doy = interval.start.strftime('%j')
return f'h{self.probe}_{year}_{doy.zfill(3)}_corefit.csv'
def local_dir(self, interval):
year = interval.start.strftime('%Y')
return pathlib.Path('helios') / 'corefit' / year
def download(self, interval):
local_dir = self.local_path(interval).parent
local_dir.mkdir(parents=True, exist_ok=True)
year = interval.start.strftime('%Y')
remote_dir = (pathlib.Path('E1_experiment') /
'New_proton_corefit_data_2017' /
'ascii' /
f'helios{self.probe}' /
f'{year}')
remote_url = '{}{}'.format(remote_base_url, remote_dir)
try:
util._download_remote(remote_url,
self.fname(interval),
local_dir)
except urllib.error.HTTPError:
raise util.NoDataError
def load_local_file(self, interval):
return pd.read_csv(self.local_path(interval), parse_dates=['Time'])
def corefit(probe, starttime, endtime):
"""
Read in merged data set
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Interval start time
endtime : datetime.datetime
Interval end time
try_download : bool
If ``False`` don't try to download data if it is missing locally.
Returns
-------
data : sunpy.timeseries.GenericTimeSeries
Data set
"""
dl = _CoreFitDownloader(probe)
return dl.load(starttime, endtime)
class _4hzDownloader(util.Downloader):
def __init__(self, probe):
self.probe = _check_probe(probe)
self.units = OrderedDict([('Bx', u.nT), ('By', u.nT),
('Bz', u.nT), ('|B|', u.nT)])
def intervals(self, starttime, endtime):
return self.intervals_daily(starttime, endtime)
def fname(self, interval):
year = int(interval.start.strftime('%Y'))
doy = int(interval.start.strftime('%j'))
return 'he{}1s{}{:03}.asc'.format(self.probe, year - 1900, doy)
def local_dir(self, interval):
year = interval.start.strftime('%Y')
return pathlib.Path('helios') / 'mag4hz' / year
def download(self, interval):
remote_dir = ('E2_experiment/'
'Data_Cologne_Nov2016_bestdata/'
'HR%20-%20High%20Resolution%204Hz%20Data/'
f'helios{self.probe}')
remote_url = f'{remote_base_url}/{remote_dir}'
local_fname = self.fname(interval)
remote_fname = None
# Because the filename contains a number between 0 and 24 at the end,
# get a list of all the filenames and compare them to the filename
# we want
def get_file_list(url, ext='', params={}):
response = requests.get(url, params=params)
if response.ok:
response_text = response.text
else:
return response.raise_for_status()
soup = BeautifulSoup(response_text, 'html.parser')
complete_file_list = [node.get('href') for node in
soup.find_all('a') if
node.get('href').endswith(ext)]
return complete_file_list
ext = 'asc'
remote_file_list = get_file_list(remote_url, ext)
for filename in remote_file_list:
if local_fname[:-4] in filename:
remote_fname = filename
break
if remote_fname is None:
raise util.NoDataError
dl_dir = self.local_path(interval).parent
util._download_remote(remote_url, remote_fname, dl_dir)
# Rename to a sensible and deterministic file name
downloaded_path = (dl_dir / remote_fname)
new_path = self.local_path(interval)
downloaded_path.rename(new_path)
def load_local_file(self, interval):
# Read in data
headings = ['Time', 'Bx', 'By', 'Bz']
cols = [0, 4, 5, 6]
data = pd.read_csv(self.local_path(interval), names=headings,
header=None, usecols=cols, delim_whitespace=True)
# Convert date info to datetime
data['Time'] = pd.to_datetime(data['Time'], format='%Y-%m-%dT%H:%M:%S')
data = data.set_index('Time', drop=True)
return data
def mag_4hz(probe, starttime, endtime):
"""
Read in 4Hz magnetic field data.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Interval start time
endtime : datetime.datetime
Interval end time
try_download : bool
If ``False`` don't try to download data if it is missing locally.
Returns
-------
data : sunpy.timeseries.GenericTimeSeries
4Hz magnetic field data set
"""
dl = _4hzDownloader(probe)
return dl.load(starttime, endtime)
class _NessDownloader(util.Downloader):
def __init__(self, probe):
self.probe = _check_probe(probe)
self.units = OrderedDict([('probe', u.dimensionless_unscaled),
('naverage', u.dimensionless_unscaled),
('Bx', u.nT), ('By', u.nT), ('Bz', u.nT),
('|B|', u.nT), ('sigma_Bx', u.nT),
('sigma_By', u.nT), ('sigma_Bz', u.nT)])
def intervals(self, starttime, endtime):
return self.intervals_daily(starttime, endtime)
def fname(self, interval):
year = int(interval.start.strftime('%Y'))
doy = int(interval.start.strftime('%j'))
return 'h{}{}{:03}.asc'.format(self.probe, year - 1900, doy)
def local_dir(self, interval):
year = interval.start.strftime('%Y')
return pathlib.Path('helios') / 'mag4hz' / year
def download(self, interval):
remote_dir = (pathlib.Path('E3_experiment') /
'helios{}_6sec_ness'.format(self.probe) /
interval.start.strftime('%Y'))
remote_url = f'{remote_base_url}{remote_dir}'
try:
util._download_remote(remote_url,
self.fname(interval),
self.local_path(interval).parent)
except URLError:
raise util.NoDataError
def load_local_file(self, interval):
# Read in data
headings = ['probe', 'year', 'doy', 'hour', 'minute', 'second',
'naverage', 'Bx', 'By', 'Bz', '|B|',
'sigma_Bx', 'sigma_By', 'sigma_Bz']
colspecs = [(1, 2), (2, 4), (4, 7), (7, 9), (9, 11), (11, 13),
(13, 15), (15, 22), (22, 29), (29, 36), (36, 42), (42, 48),
(48, 54), (54, 60)]
data = pd.read_fwf(self.local_path(interval), names=headings,
header=None, colspecs=colspecs)
# Process data
data['year'] += 1900
# Convert date info to datetime
data['Time'] = pd.to_datetime(data['year'], format='%Y') + \
pd.to_timedelta(data['doy'] - 1, unit='d') + \
pd.to_timedelta(data['hour'], unit='h') + \
pd.to_timedelta(data['minute'], unit='m') + \
pd.to_timedelta(data['second'], unit='s')
data = data.drop(['year', 'doy', 'hour', 'minute', 'second'], axis=1)
data = data.set_index('Time', drop=False)
return data
def mag_ness(probe, starttime, endtime):
"""
Read in 6 second magnetic field data.
Parameters
----------
probe : int, str
Helios probe to import data from. Must be 1 or 2.
starttime : datetime.datetime
Interval start time
endtime : datetime.datetime
Interval end time
try_download : bool
If ``False`` don't try to download data if it is missing locally.
Returns
-------
data : pandas.DataFrame
6 second magnetic field data set
"""
dl = _NessDownloader(probe)
return dl.load(starttime, endtime)
def _docstring(identifier, extra):
return cdasrest._docstring(identifier, 'M', extra)
def _helios(starttime, endtime, identifier, units=None,
warn_missing_units=True):
"""
Generic method for downloading Helios data from CDAWeb.
"""
dl = cdasrest.CDASDwonloader('helios', identifier, 'helios', units=units,
warn_missing_units=warn_missing_units)
return dl.load(starttime, endtime)
def merged(probe, starttime, endtime):
identifier = f'HELIOS{probe}_40SEC_MAG-PLASMA'
return _helios(starttime, endtime, identifier,
warn_missing_units=False)
merged.__doc__ = _docstring(
'HELIOS1_40SEC_MAG-PLASMA', 'merged magnetic field and plasma data.')
| dstansby/heliopy | heliopy/data/helios.py | Python | gpl-3.0 | 38,861 | 0 |
# coding=utf-8
# Copyright (c) 2001, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# [matrix] channel #navitia:matrix.org (https://app.element.io/#/room/#navitia:matrix.org)
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from tests.mock_navitia import navitia_response
response = navitia_response.NavitiaResponse()
response.queries = [
"vehicle_journeys/?filter=vehicle_journey.has_code(source, Code-orders)&since=20120615T120000Z&until=20120615T190000Z&data_freshness=base_schedule&depth=2"
# resquest time is UTC -> 12:00 is 8:00 local in Sherbrooke
]
response.response_code = 200
response.json_response = """
{
"disruptions": [],
"feed_publishers": [
{
"id": "builder",
"license": "ODBL",
"name": "departure board",
"url": "www.canaltp.fr"
}
],
"links": [
],
"pagination": {
"items_on_page": 1,
"items_per_page": 25,
"start_page": 0,
"total_result": 1
},
"vehicle_journeys": [
{
"calendars": [
{
"active_periods": [
{
"begin": "20120615",
"end": "20130615"
}
],
"week_pattern": {
"friday": true,
"monday": false,
"saturday": false,
"sunday": false,
"thursday": false,
"tuesday": false,
"wednesday": false
}
}
],
"disruptions": [],
"id": "R:vj1",
"name": "R:vj1",
"stop_times": [
{
"arrival_time": "100000",
"departure_time": "100000",
"utc_arrival_time": "140000",
"utc_departure_time": "140000",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:14"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR1"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR1",
"label": "StopR1",
"links": [],
"name": "StopR1",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR1",
"label": "StopR1",
"links": [],
"name": "StopR1",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "101000",
"departure_time": "101000",
"utc_arrival_time": "140100",
"utc_departure_time": "140100",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:15"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR2"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR2",
"label": "StopR2",
"links": [],
"name": "StopR2",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR2",
"label": "StopR2",
"links": [],
"name": "StopR2",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "102000",
"departure_time": "102000",
"utc_arrival_time": "140200",
"utc_departure_time": "140200",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:16"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR3"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR3",
"label": "StopR3",
"links": [],
"name": "StopR3",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR3",
"label": "StopR3",
"links": [],
"name": "StopR3",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "103000",
"departure_time": "103000",
"utc_arrival_time": "140300",
"utc_departure_time": "140300",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR4"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR4",
"label": "StopR4",
"links": [],
"name": "StopR4",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR4",
"label": "StopR4",
"links": [],
"name": "StopR4",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "104000",
"departure_time": "104000",
"utc_arrival_time": "140400",
"utc_departure_time": "140400",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR5"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR5",
"label": "StopR5",
"links": [],
"name": "StopR5",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR5",
"label": "StopR5",
"links": [],
"name": "StopR5",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "105000",
"departure_time": "105000",
"utc_arrival_time": "140500",
"utc_departure_time": "140500",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR6"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR6",
"label": "StopR6",
"links": [],
"name": "StopR6",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR6",
"label": "StopR6",
"links": [],
"name": "StopR6",
"timezone": "America/Montreal"
}
}
}
],
"trip": {
"id": "R:vj1",
"name": "R:vj1"
},
"validity_pattern": {
"beginning_date": "20120614",
"days": "100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010"
}
}
]
}
"""
| CanalTP/kirin | tests/mock_navitia/vj_bad_order.py | Python | agpl-3.0 | 13,166 | 0.000228 |
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.quantity` module.
"""
import unittest
import math
import numpy
import rmgpy.constants as constants
import rmgpy.quantity as quantity
################################################################################
class TestAcceleration(unittest.TestCase):
"""
Contains unit tests of the Acceleration unit type object.
"""
def test_mpers2(self):
"""
Test the creation of an acceleration quantity with units of m/s^2.
"""
q = quantity.Acceleration(1.0,"m/s^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m/s^2")
def test_cmpers2(self):
"""
Test the creation of an acceleration quantity with units of cm/s^2.
"""
q = quantity.Acceleration(1.0,"cm/s^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 0.01, delta=1e-8)
self.assertEqual(q.units, "cm/s^2")
################################################################################
class TestArea(unittest.TestCase):
"""
Contains unit tests of the Area unit type object.
"""
def test_m2(self):
"""
Test the creation of an area quantity with units of m^2.
"""
q = quantity.Area(1.0,"m^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^2")
def test_cm2(self):
"""
Test the creation of an area quantity with units of m^2.
"""
q = quantity.Area(1.0,"cm^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-4, delta=1e-10)
self.assertEqual(q.units, "cm^2")
################################################################################
class TestConcentration(unittest.TestCase):
"""
Contains unit tests of the Concentration unit type object.
"""
def test_perm3(self):
"""
Test the creation of an concentration quantity with units of m^-3.
"""
try:
q = quantity.Concentration(1.0,"m^-3")
self.fail('Allowed invalid unit type "m^-3".')
except quantity.QuantityError:
pass
def test_molperm3(self):
"""
Test the creation of an concentration quantity with units of mol/m^3.
"""
q = quantity.Concentration(1.0,"mol/m^3")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "mol/m^3")
def test_moleculesperm3(self):
"""
Test the creation of an concentration quantity with units of molecules/m^3.
"""
q = quantity.Concentration(1.0,"molecules/m^3")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.Na, 1.0, delta=1e-6)
self.assertEqual(q.units, "molecules/m^3")
################################################################################
class TestEnergy(unittest.TestCase):
"""
Contains unit tests of the Energy unit type object.
"""
def test_J(self):
"""
Test the creation of an energy quantity with units of J.
"""
try:
q = quantity.Energy(1.0,"J")
self.fail('Allowed invalid unit type "J".')
except quantity.QuantityError:
pass
def test_Jpermol(self):
"""
Test the creation of an energy quantity with units of J/mol.
"""
q = quantity.Energy(1.0,"J/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "J/mol")
def test_cal(self):
"""
Test the creation of an energy quantity with units of cal.
"""
try:
q = quantity.Energy(1.0,"cal")
self.fail('Allowed invalid unit type "cal".')
except quantity.QuantityError:
pass
def test_calpermol(self):
"""
Test the creation of an energy quantity with units of cal/mol.
"""
q = quantity.Energy(1.0,"cal/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4.184, delta=1e-6)
self.assertEqual(q.units, "cal/mol")
def test_kJ(self):
"""
Test the creation of an energy quantity with units of kJ.
"""
try:
q = quantity.Energy(1.0,"kJ")
self.fail('Allowed invalid unit type "kJ".')
except quantity.QuantityError:
pass
def test_kJpermol(self):
"""
Test the creation of an energy quantity with units of kJ/mol.
"""
q = quantity.Energy(1.0,"kJ/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1000., delta=1e-6)
self.assertEqual(q.units, "kJ/mol")
def test_kcal(self):
"""
Test the creation of an energy quantity with units of kcal.
"""
try:
q = quantity.Energy(1.0,"kcal")
self.fail('Allowed invalid unit type "kcal".')
except quantity.QuantityError:
pass
def test_kcalpermol(self):
"""
Test the creation of an energy quantity with units of kcal/mol.
"""
q = quantity.Energy(1.0,"kcal/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4184., delta=1e-6)
self.assertEqual(q.units, "kcal/mol")
def test_Kelvin(self):
"""
Test the creation of an energy quantity with units of K (not really an energy!).
"""
q = quantity.Energy(10.0,"K")
self.assertAlmostEqual(q.value, 10*8.314472, delta=1e-6)
self.assertEqual(q.units, "J/mol")
################################################################################
class TestDipoleMoment(unittest.TestCase):
"""
Contains unit tests of the DipoleMoment unit type object.
"""
def test_Ctimesm(self):
"""
Test the creation of a dipole moment quantity with units of C*m.
"""
q = quantity.DipoleMoment(1.0,"C*m")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, 6)
self.assertEqual(q.units, "C*m")
def test_D(self):
"""
Test the creation of a dipole moment quantity with units of J/mol.
"""
q = quantity.DipoleMoment(1.0,"De")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.c*1.0e21, 1.0, 6)
self.assertEqual(q.units, "De")
################################################################################
class TestFlux(unittest.TestCase):
"""
Contains unit tests of the Flux unit type object.
"""
def test_perm2pers(self):
"""
Test the creation of a flux quantity with units of m^-2*s^-1.
"""
try:
q = quantity.Flux(1.0,"m^-2*s^-1")
self.fail('Allowed invalid unit type "m^-2*s^-1".')
except quantity.QuantityError:
pass
def test_molperm3(self):
"""
Test the creation of a flux quantity with units of mol/(m^2*s).
"""
q = quantity.Flux(1.0,"mol/(m^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "mol/(m^2*s)")
def test_moleculesperm3(self):
"""
Test the creation of a flux quantity with units of molecules/(m^2*s).
"""
q = quantity.Flux(1.0,"molecules/(m^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.Na, 1.0, delta=1e-6)
self.assertEqual(q.units, "molecules/(m^2*s)")
################################################################################
class TestForce(unittest.TestCase):
"""
Contains unit tests of the Force unit type object.
"""
def test_N(self):
"""
Test the creation of an force quantity with units of N.
"""
q = quantity.Force(1.0,"N")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "N")
################################################################################
class TestFrequency(unittest.TestCase):
"""
Contains unit tests of the Frequency unit type object. Note that, as a
special case, frequencies can be read in several units, but are always
stored internally as cm^-1.
"""
def test_cm_1(self):
"""
Test the creation of a frequency quantity with units of cm^-1.
"""
q = quantity.Frequency(1.0,"cm^-1")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^-1")
def test_s_1(self):
"""
Test the creation of a frequency quantity with units of s^-1.
"""
q = quantity.Frequency(1.0,"s^-1")
self.assertAlmostEqual(q.value, 1./(constants.c*100.), delta=1e-17)
self.assertAlmostEqual(q.value_si, 1./(constants.c*100.), delta=1e-17)
self.assertEqual(q.units, "cm^-1")
def test_K(self):
"""
Test the creation of a frequency quantity with units of K.
"""
q = quantity.Frequency(1.0,"K")
self.assertAlmostEqual(q.value, constants.kB/(constants.h*constants.c*100.), 6)
self.assertAlmostEqual(q.value_si, constants.kB/(constants.h*constants.c*100.), delta=1e-6)
self.assertEqual(q.units, "cm^-1")
def test_eV(self):
"""
Test the creation of a frequency quantity with units of eV.
"""
q = quantity.Frequency(1.0,"eV")
self.assertAlmostEqual(q.value, constants.e/(constants.h*constants.c*100.), 2)
self.assertAlmostEqual(q.value_si, constants.e/(constants.h*constants.c*100.), delta=1e-2)
self.assertEqual(q.units, "cm^-1")
def test_Hz(self):
"""
Test the creation of a frequency quantity with units of Hz.
"""
q = quantity.Frequency(1.0,"Hz")
self.assertAlmostEqual(q.value, 1./(constants.c*100.), delta=1e-17)
self.assertAlmostEqual(q.value_si, 1./(constants.c*100.), delta=1e-17)
self.assertEqual(q.units, "cm^-1")
def test_kHz(self):
"""
Test the creation of a frequency quantity with units of kHz.
"""
q = quantity.Frequency(1.0,"kHz")
self.assertAlmostEqual(q.value, 1e3/(constants.c*100.), delta=1e-14)
self.assertAlmostEqual(q.value_si, 1e3/(constants.c*100.), delta=1e-14)
self.assertEqual(q.units, "cm^-1")
def test_MHz(self):
"""
Test the creation of a frequency quantity with units of MHz.
"""
q = quantity.Frequency(1.0,"MHz")
self.assertAlmostEqual(q.value, 1e6/(constants.c*100.), delta=1e-11)
self.assertAlmostEqual(q.value_si, 1e6/(constants.c*100.), delta=1e-11)
self.assertEqual(q.units, "cm^-1")
def test_GHz(self):
"""
Test the creation of a frequency quantity with units of GHz.
"""
q = quantity.Frequency(1.0,"GHz")
self.assertAlmostEqual(q.value, 1e9/(constants.c*100.), delta=1e-08)
self.assertAlmostEqual(q.value_si, 1e9/(constants.c*100.), delta=1e-08)
self.assertEqual(q.units, "cm^-1")
################################################################################
class TestHeatCapacity(unittest.TestCase):
"""
Contains unit tests of the HeatCapacity unit type object.
"""
def test_JperK(self):
"""
Test the creation of a heat capacity quantity with units of J/K.
"""
try:
q = quantity.HeatCapacity(1.0,"J/K")
self.fail('Allowed invalid unit type "J/K".')
except quantity.QuantityError:
pass
def test_JpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of J/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"J/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "J/(mol*K)")
def test_calperK(self):
"""
Test the creation of a heat capacity quantity with units of cal/K.
"""
try:
q = quantity.HeatCapacity(1.0,"cal/K")
self.fail('Allowed invalid unit type "cal/K".')
except quantity.QuantityError:
pass
def test_calpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of cal/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"cal/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4.184, delta=1e-6)
self.assertEqual(q.units, "cal/(mol*K)")
def test_kJperK(self):
"""
Test the creation of a heat capacity quantity with units of kJ/K.
"""
try:
q = quantity.HeatCapacity(1.0,"kJ/K")
self.fail('Allowed invalid unit type "kJ/K".')
except quantity.QuantityError:
pass
def test_kJpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of kJ/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"kJ/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1000., delta=1e-6)
self.assertEqual(q.units, "kJ/(mol*K)")
def test_kcalperK(self):
"""
Test the creation of a heat capacity quantity with units of kcal/K.
"""
try:
q = quantity.HeatCapacity(1.0,"kcal/K")
self.fail('Allowed invalid unit type "kcal/K".')
except quantity.QuantityError:
pass
def test_kcalpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of kcal/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"kcal/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4184., delta=1e-6)
self.assertEqual(q.units, "kcal/(mol*K)")
################################################################################
class TestInertia(unittest.TestCase):
"""
Contains unit tests of the Inertia unit type object.
"""
def test_kg_m2(self):
"""
Test the creation of a moment of inertia quantity with units of kg*m^2.
"""
q = quantity.Inertia(1.0,"kg*m^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "kg*m^2")
def test_amu_angstrom2(self):
"""
Test the creation of a moment of inertia quantity with units of amu*angstrom^2.
"""
q = quantity.Inertia(1.0,"amu*angstrom^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.Na*1e23, 1.0, delta=1e-6)
self.assertEqual(q.units, "amu*angstrom^2")
################################################################################
class TestLength(unittest.TestCase):
"""
Contains unit tests of the Length unit type object.
"""
def test_m(self):
"""
Test the creation of a length quantity with units of m.
"""
q = quantity.Length(1.0,"m")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m")
def test_km(self):
"""
Test the creation of a length quantity with units of km.
"""
q = quantity.Length(1.0,"km")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e3, delta=1e-3)
self.assertEqual(q.units, "km")
def test_cm(self):
"""
Test the creation of a length quantity with units of cm.
"""
q = quantity.Length(1.0,"cm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-2, delta=1e-8)
self.assertEqual(q.units, "cm")
def test_mm(self):
"""
Test the creation of a length quantity with units of mm.
"""
q = quantity.Length(1.0,"mm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9)
self.assertEqual(q.units, "mm")
def test_um(self):
"""
Test the creation of a length quantity with units of um.
"""
q = quantity.Length(1.0,"um")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-6, delta=1e-12)
self.assertEqual(q.units, "um")
def test_nm(self):
"""
Test the creation of a length quantity with units of nm.
"""
q = quantity.Length(1.0,"nm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-9, delta=1e-15)
self.assertEqual(q.units, "nm")
def test_pm(self):
"""
Test the creation of a length quantity with units of pm.
"""
q = quantity.Length(1.0,"pm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-12, delta=1e-18)
self.assertEqual(q.units, "pm")
################################################################################
class TestMass(unittest.TestCase):
"""
Contains unit tests of the Mass unit type object.
"""
def test_kg(self):
"""
Test the creation of a mass quantity with units of kg.
"""
q = quantity.Mass(1.0,"kg")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "kg")
def test_gpermol(self):
"""
Test the creation of a mass quantity with units of g/mol. Note that
g/mol is automatically coerced to amu.
"""
q = quantity.Mass(1.0,"g/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, constants.amu, delta=1e-32)
self.assertEqual(q.units, "amu")
def test_kgpermol(self):
"""
Test the creation of a mass quantity with units of kg/mol. Note that
kg/mol is automatically coerced to amu.
"""
q = quantity.Mass(1.0,"kg/mol")
self.assertAlmostEqual(q.value, 1000.0, 3)
self.assertAlmostEqual(q.value_si, 1000.*constants.amu, delta=1e-29)
self.assertEqual(q.units, "amu")
def test_amu(self):
"""
Test the creation of a mass quantity with units of amu.
"""
q = quantity.Mass(1.0,"amu")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, constants.amu, delta=1e-32)
self.assertEqual(q.units, "amu")
################################################################################
class TestMomentum(unittest.TestCase):
"""
Contains unit tests of the Momentum unit type object.
"""
def test_kgmpers2(self):
"""
Test the creation of a momentum quantity with units of kg*m/s^2.
"""
q = quantity.Momentum(1.0,"kg*m/s^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "kg*m/s^2")
################################################################################
class TestPower(unittest.TestCase):
"""
Contains unit tests of the Power unit type object.
"""
def test_W(self):
"""
Test the creation of a power quantity with units of W.
"""
q = quantity.Power(1.0,"W")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "W")
################################################################################
class TestPressure(unittest.TestCase):
"""
Contains unit tests of the Pressure unit type object.
"""
def test_Pa(self):
"""
Test the creation of a pressure quantity with units of Pa.
"""
q = quantity.Pressure(1.0,"Pa")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "Pa")
def test_bar(self):
"""
Test the creation of a pressure quantity with units of bar.
"""
q = quantity.Pressure(1.0,"bar")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e5, delta=1e-6)
self.assertEqual(q.units, "bar")
def test_atm(self):
"""
Test the creation of a pressure quantity with units of atm.
"""
q = quantity.Pressure(1.0,"atm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 101325., delta=1e-6)
self.assertEqual(q.units, "atm")
def test_torr(self):
"""
Test the creation of a pressure quantity with units of torr.
"""
q = quantity.Pressure(1.0,"torr")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 101325./760., delta=1e-6)
self.assertEqual(q.units, "torr")
def test_psi(self):
"""
Test the creation of a pressure quantity with units of psi.
"""
q = quantity.Pressure(1.0,"psi")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 101325./14.695949, delta=1e-2)
self.assertEqual(q.units, "psi")
################################################################################
class TestRateCoefficient(unittest.TestCase):
"""
Contains unit tests of the RateCoefficient unit type object.
"""
def test_s(self):
"""
Test the creation of a rate coefficient quantity with units of s^-1.
"""
q = quantity.RateCoefficient(1.0,"s^-1")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "s^-1")
def test_m3permols(self):
"""
Test the creation of a rate coefficient quantity with units of m^3/(mol*s).
"""
q = quantity.RateCoefficient(1.0,"m^3/(mol*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^3/(mol*s)")
def test_m6permol2s(self):
"""
Test the creation of a rate coefficient quantity with units of m^6/(mol^2*s).
"""
q = quantity.RateCoefficient(1.0,"m^6/(mol^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^6/(mol^2*s)")
def test_m9permol3s(self):
"""
Test the creation of a rate coefficient quantity with units of m^9/(mol^3*s).
"""
q = quantity.RateCoefficient(1.0,"m^9/(mol^3*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^9/(mol^3*s)")
def test_cm3permols(self):
"""
Test the creation of a rate coefficient quantity with units of cm^3/(mol*s).
"""
q = quantity.RateCoefficient(1.0,"cm^3/(mol*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*1e6, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^3/(mol*s)")
def test_cm6permol2s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^6/(mol^2*s).
"""
q = quantity.RateCoefficient(1.0,"cm^6/(mol^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6)**2, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^6/(mol^2*s)")
def test_cm9permol3s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^9/(mol^3*s).
"""
q = quantity.RateCoefficient(1.0,"cm^9/(mol^3*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6)**3, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^9/(mol^3*s)")
def test_cm3permolecules(self):
"""
Test the creation of a rate coefficient quantity with units of cm^3/(molecule*s).
"""
q = quantity.RateCoefficient(1.0,"cm^3/(molecule*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*1e6/constants.Na, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^3/(molecule*s)")
def test_cm6permolecule2s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^6/(molecule^2*s).
"""
q = quantity.RateCoefficient(1.0,"cm^6/(molecule^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6/constants.Na)**2, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^6/(molecule^2*s)")
def test_cm9permolecule3s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^9/(molecule^3*s).
"""
q = quantity.RateCoefficient(1.0,"cm^9/(molecule^3*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6/constants.Na)**3, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^9/(molecule^3*s)")
################################################################################
class TestTemperature(unittest.TestCase):
"""
Contains unit tests of the Temperature unit type object.
"""
def test_K(self):
"""
Test the creation of a temperature quantity with units of K.
"""
q = quantity.Temperature(1.0,"K")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "K")
def test_degC(self):
"""
Test the creation of a temperature quantity with units of degrees C.
"""
with self.assertRaises(NotImplementedError):
q = quantity.Temperature(1.0,"degC")
def test_degF(self):
"""
Test the creation of a temperature quantity with units of degrees F.
"""
with self.assertRaises(NotImplementedError):
q = quantity.Temperature(1.0,"degF")
def test_degR(self):
"""
Test the creation of a temperature quantity with units of degrees R.
"""
with self.assertRaises(NotImplementedError):
q = quantity.Temperature(1.0,"degR")
################################################################################
class TestTime(unittest.TestCase):
"""
Contains unit tests of the Time unit type object.
"""
def test_s(self):
"""
Test the creation of a time quantity with units of s.
"""
q = quantity.Time(1.0,"s")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "s")
def test_ms(self):
"""
Test the creation of a time quantity with units of ms.
"""
q = quantity.Time(1.0,"ms")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9)
self.assertEqual(q.units, "ms")
def test_us(self):
"""
Test the creation of a time quantity with units of us.
"""
q = quantity.Time(1.0,"us")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-6, delta=1e-12)
self.assertEqual(q.units, "us")
def test_ns(self):
"""
Test the creation of a time quantity with units of ns.
"""
q = quantity.Time(1.0,"ns")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-9, delta=1e-15)
self.assertEqual(q.units, "ns")
def test_ps(self):
"""
Test the creation of a time quantity with units of ps.
"""
q = quantity.Time(1.0,"ps")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-12, delta=1e-18)
self.assertEqual(q.units, "ps")
def test_fs(self):
"""
Test the creation of a time quantity with units of fs.
"""
q = quantity.Time(1.0,"fs")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-15, delta=1e-21)
self.assertEqual(q.units, "fs")
def test_min(self):
"""
Test the creation of a time quantity with units of min.
"""
q = quantity.Time(1.0,"min")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 60.0, delta=1e-6)
self.assertEqual(q.units, "min")
def test_hr(self):
"""
Test the creation of a time quantity with units of hr.
"""
q = quantity.Time(1.0,"hr")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 3600.0, delta=1e-6)
self.assertEqual(q.units, "hr")
################################################################################
class TestVelocity(unittest.TestCase):
"""
Contains unit tests of the Velocity unit type object.
"""
def test_mpers(self):
"""
Test the creation of an velocity quantity with units of m/s.
"""
q = quantity.Velocity(1.0,"m/s")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m/s")
def test_cmpers(self):
"""
Test the creation of an velocity quantity with units of m/s.
"""
q = quantity.Velocity(1.0,"cm/s")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 0.01, delta=1e-8)
self.assertEqual(q.units, "cm/s")
################################################################################
class TestVolume(unittest.TestCase):
"""
Contains unit tests of the Volume unit type object.
"""
def test_m3(self):
"""
Test the creation of an volume quantity with units of m^3.
"""
q = quantity.Volume(1.0,"m^3")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^3")
def test_L(self):
"""
Test the creation of an volume quantity with units of L.
"""
q = quantity.Volume(1.0,"L")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9)
self.assertEqual(q.units, "L")
class TestQuantity(unittest.TestCase):
"""
Contains unit tests testing the value and uncertainty storage behavior for ScalarQuantity and ArrayQuantity objects
"""
def setUp(self):
"""
A function run before each unit test in this class. This tests the creation of several both ScalarQuantity
and ArrayQuantity objects
"""
self.Cp = quantity.Quantity([-6.51,-5.19333,-4.47333,-3.76,-3.44333,-2.94667,-2.47],'cal/(mol*K)',
'+|-',[2.72057,3.42407,4.84068,5.11681,5.13207,5.8757,8.29108])
self.v = quantity.Quantity([5,10,12],'cm/s','*|/',[1.2,0.4,1])
self.H = quantity.Quantity(33.1097,'kcal/mol','+|-',24.8344)
self.A = quantity.Quantity(7.25e+13, 'cm^3/(mol*s)', '*|/', 5)
self.Cp_array = quantity.ArrayQuantity([-6.51,-5.19333,-4.47333,-3.76,-3.44333,-2.94667,-2.47],'cal/(mol*K)',
[2.72057,3.42407,4.84068,5.11681,5.13207,5.8757,8.29108],'+|-')
self.v_array = quantity.ArrayQuantity([5,10,12],'cm/s',[1.2,0.4,1],'*|/')
self.H_scalar = quantity.ScalarQuantity(33.1097,'kcal/mol',24.8344,'+|-',)
self.A_scalar = quantity.ScalarQuantity(7.25e+13, 'cm^3/(mol*s)', 5,'*|/')
def test_scalar_conversion(self):
"""
ScalarQuantity: test that the value and uncertainty get converted to the proper si value.
"""
# Uncertainty of type +|- must be adjusted by units
self.assertAlmostEqual(self.H.value_si,self.H.value*4184)
self.assertAlmostEqual(self.H.uncertainty_si, self.H.uncertainty*4184)
self.assertAlmostEqual(self.H_scalar.value_si,self.H_scalar.value*4184)
self.assertAlmostEqual(self.H_scalar.uncertainty_si, self.H_scalar.uncertainty*4184)
# Uncertainty of type *|/ does not need to be adjusted by units
self.assertAlmostEqual(self.A.value_si,self.A.value*1e-6)
self.assertAlmostEqual(self.A.uncertainty_si, self.A.uncertainty)
self.assertAlmostEqual(self.A_scalar.value_si, self.A_scalar.value*1e-6)
self.assertAlmostEqual(self.A_scalar.uncertainty_si, self.A_scalar.uncertainty)
def test_array_conversion(self):
"""
ArrayQuantity: test that the value and uncertainty get converted to the proper si value.
"""
numpy.testing.assert_array_almost_equal(self.v.value_si, self.v.value*1e-2)
numpy.testing.assert_array_almost_equal(self.v.uncertainty_si, self.v.uncertainty)
numpy.testing.assert_array_almost_equal(self.v_array.value_si, self.v.value*1e-2)
numpy.testing.assert_array_almost_equal(self.v_array.uncertainty_si, self.v.uncertainty)
numpy.testing.assert_array_almost_equal(self.Cp.value_si, self.Cp.value*4.184)
numpy.testing.assert_array_almost_equal(self.Cp.uncertainty_si, self.Cp.uncertainty*4.184)
numpy.testing.assert_array_almost_equal(self.Cp_array.value_si, self.Cp.value*4.184)
numpy.testing.assert_array_almost_equal(self.Cp_array.uncertainty_si, self.Cp.uncertainty*4.184)
def test_scalar_repr(self):
"""
Test that the ScalarQuantity objects can be recreated using their __repr__ function
"""
# Test that the values can be reconstituted
H = quantity.Quantity(eval(repr(self.H)))
self.assertEqual(H.value_si, self.H.value_si)
self.assertEqual(H.uncertainty_si, self.H.uncertainty_si)
self.assertEqual(H.uncertaintyType, self.H.uncertaintyType)
self.assertEqual(H.units, self.H.units)
A = quantity.Quantity(eval(repr(self.A)))
self.assertEqual(A.value_si, self.A.value_si)
self.assertEqual(A.uncertainty_si, self.A.uncertainty_si)
self.assertEqual(A.uncertaintyType, self.A.uncertaintyType)
self.assertEqual(A.units, self.A.units)
# Test that the __repr__ strings are the same
self.assertEqual(repr(H),repr(self.H))
self.assertEqual(repr(self.H),repr(self.H_scalar))
self.assertEqual(repr(A),repr(self.A))
self.assertEqual(repr(self.A),repr(self.A_scalar))
def test_array_repr(self):
"""
Test that the ArrayQuantity objects can be recreated using their __repr__ function
"""
# Test that the values can be reconstituted
Cp = quantity.Quantity(eval(repr(self.Cp)))
numpy.testing.assert_array_almost_equal(Cp.value_si, self.Cp.value_si)
numpy.testing.assert_array_almost_equal(Cp.uncertainty_si, self.Cp.uncertainty_si)
self.assertEqual(Cp.uncertaintyType, self.Cp.uncertaintyType)
self.assertEqual(Cp.units, self.Cp.units)
v = quantity.Quantity(eval(repr(self.v)))
numpy.testing.assert_array_almost_equal(v.value_si, self.v.value_si)
numpy.testing.assert_array_almost_equal(v.uncertainty_si, self.v.uncertainty_si)
self.assertEqual(v.uncertaintyType, self.v.uncertaintyType)
self.assertEqual(v.units, self.v.units)
# Test that the __repr__ strings are the same
self.assertEqual(repr(Cp),repr(self.Cp))
self.assertEqual(repr(self.Cp),repr(self.Cp_array))
self.assertEqual(repr(v),repr(self.v))
self.assertEqual(repr(self.v),repr(self.v_array)) | pierrelb/RMG-Py | rmgpy/quantityTest.py | Python | mit | 38,311 | 0.007021 |
import re, os
import util
from InputParameters import InputParameters
from MooseObject import MooseObject
class Tester(MooseObject):
@staticmethod
def validParams():
params = MooseObject.validParams()
# Common Options
params.addRequiredParam('type', "The type of test of Tester to create for this test.")
params.addParam('max_time', 300, "The maximum in seconds that the test will be allowed to run.")
params.addParam('min_reported_time', "The minimum time elapsed before a test is reported as taking to long to run.")
params.addParam('skip', "Provide a reason this test will be skipped.")
params.addParam('deleted', "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")
params.addParam('heavy', False, "Set to True if this test should only be run when the '--heavy' option is used.")
params.addParam('group', [], "A list of groups for which this test belongs.")
params.addParam('prereq', [], "A list of prereq tests that need to run successfully before launching this test.")
params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
params.addParam('scale_refine', 0, "The number of refinements to do when scaling")
params.addParam('success_message', 'OK', "The successful message")
params.addParam('cli_args', [], "Additional arguments to be passed to the test.")
params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")
# Test Filters
params.addParam('platform', ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
params.addParam('compiler', ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('mesh_mode', ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
params.addParam('method', ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
params.addParam('library_mode', ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
params.addParam('dtk', ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_ids', ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
params.addParam('recover', True, "A test that runs with '--recover' mode enabled")
params.addParam('vtk', ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('tecplot', ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('dof_id_bytes', ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
params.addParam('petsc_debug', ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
params.addParam('curl', ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('tbb', ['ALL'], "A test that runs only if TBB is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('superlu', ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc', ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_id', ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
params.addParam('cxx11', ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('asio', ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('depend_files', [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
params.addParam('env_vars', [], "A test that only runs if all the environment variables listed exist")
params.addParam('should_execute', True, 'Whether or not the executable needs to be run. Use this to chain together multiple tests based off of one executeable invocation')
params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
params.addParam('check_input', False, "Check for correct input file syntax")
params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
return params
def __init__(self, name, params):
MooseObject.__init__(self, name, params)
self.specs = params
# Initialize the status bucket class
self.status = util.TestStatus()
# Enumerate the buckets here so ther are easier to work with in the tester class
self.bucket_success = self.status.bucket_success
self.bucket_fail = self.status.bucket_fail
self.bucket_diff = self.status.bucket_diff
self.bucket_pbs = self.status.bucket_pbs
self.bucket_pending = self.status.bucket_pending
self.bucket_deleted = self.status.bucket_deleted
self.bucket_skip = self.status.bucket_skip
self.bucket_silent = self.status.bucket_silent
# Initialize the tester with a pending status
self.setStatus('launched', self.bucket_pending)
# Set the status message
if self.specs['check_input']:
self.success_message = 'SYNTAX PASS'
else:
self.success_message = self.specs['success_message']
# Set up common paramaters
self.should_execute = self.specs['should_execute']
self.check_input = self.specs['check_input']
def getTestName(self):
return self.specs['test_name']
def getPrereqs(self):
return self.specs['prereq']
# Method to return if the test can run
def getRunnable(self):
return self.status.getRunnable()
# Method to return text color based on current test status
def getColor(self):
return self.status.getColor()
# Method to return the input file if applicable to this Tester
def getInputFile(self):
return None
# Method to return the output files if applicable to this Tester
def getOutputFiles(self):
return []
# Method to return the successful message printed to stdout
def getSuccessMessage(self):
return self.success_message
# Method to return status text (exodiff, crash, skipped because x, y and z etc)
def getStatusMessage(self):
return self.status.getStatusMessage()
# Method to return status bucket tuple
def getStatus(self):
return self.status.getStatus()
# Method to set the bucket status
def setStatus(self, reason, bucket):
self.status.setStatus(reason, bucket)
return self.getStatus()
# Method to check if a test has failed. This method will return true if a
# tester has failed at any point during the processing of the test.
# Note: It's possible for a tester to report false for both didFail and
# didPass. This will happen if the tester is in-progress for instance.
# See didPass()
def didFail(self):
return self.status.didFail()
# Method to check for successfull test
# Note: This method can return False until the tester has completely finished.
# For this reason it should be used only after the tester has completed.
# Instead you may want to use the didFail method which returns false
# only if the tester has failed at any point during the processing
# of that tester (e.g. after the main command has been run but before
# output has been tested).
# See didFail()
def didPass(self):
return self.status.didPass()
def getCheckInput(self):
return self.check_input
def setValgrindMode(self, mode):
# Increase the alloted time for tests when running with the valgrind option
if mode == 'NORMAL':
self.specs['max_time'] = self.specs['max_time'] * 2
elif mode == 'HEAVY':
self.specs['max_time'] = self.specs['max_time'] * 6
# Override this method to tell the harness whether or not this test should run.
# This function should return a tuple (Boolean, reason)
# If a reason is provided it'll be printed and counted as skipped. If the reason
# is left blank, the test will not be printed at all nor counted in the test totals.
def checkRunnable(self, options):
return (True, '')
# Whether or not the executeable should be run
# Don't override this
def shouldExecute(self):
return self.should_execute
# This method is called prior to running the test. It can be used to cleanup files
# or do other preparations before the tester is run
def prepare(self, options):
return
def getThreads(self, options):
return 1
def getProcs(self, options):
return 1
# This method should return the executable command that will be executed by the tester
def getCommand(self, options):
return
# This method is called to return the commands (list) used for processing results
def processResultsCommand(self, moose_dir, options):
return []
# This method will be called to process the results of running the test. Any post-test
# processing should happen in this method
def processResults(self, moose_dir, retcode, options, output):
return
# This is the base level runnable check common to all Testers. DO NOT override
# this method in any of your derived classes. Instead see "checkRunnable"
def checkRunnableBase(self, options, checks, test_list=None):
reason = ''
# If --dry-run set the test status to pass and DO NOT return.
# This will allow additional checks to perform and report tests
# that would normally be skipped (and return as False).
if options.dry_run:
self.success_message = 'DRY RUN'
self.setStatus(self.success_message, self.bucket_success)
# Check if we only want to run failed tests
if options.failed_tests:
if self.specs['test_name'] not in test_list:
self.setStatus('not failed', self.bucket_silent)
return False
# Are we running only tests in a specific group?
if options.group <> 'ALL' and options.group not in self.specs['group']:
self.setStatus('unmatched group', self.bucket_silent)
return False
if options.not_group <> '' and options.not_group in self.specs['group']:
self.setStatus('unmatched group', self.bucket_silent)
return False
# Store regexp for matching tests if --re is used
if options.reg_exp:
match_regexp = re.compile(options.reg_exp)
# If --re then only test matching regexp. Needs to run before other SKIP methods
# This also needs to be in its own bucket group. We normally print skipped messages.
# But we do not want to print tests that didn't match regex.
if options.reg_exp and not match_regexp.search(self.specs['test_name']):
self.setStatus('silent', self.bucket_silent)
return False
# Check for deleted tests
if self.specs.isValid('deleted'):
if options.extra_info:
# We might want to trim the string so it formats nicely
if len(self.specs['deleted']) >= util.TERM_COLS - (len(self.specs['test_name'])+21):
test_reason = (self.specs['deleted'])[:(util.TERM_COLS - (len(self.specs['test_name'])+24))] + '...'
else:
test_reason = self.specs['deleted']
reason = 'deleted (' + test_reason + ')'
self.setStatus(reason, self.bucket_deleted)
return False
# Check for skipped tests
if self.specs.type('skip') is bool and self.specs['skip']:
# Backwards compatible (no reason)
self.setStatus('no reason', self.bucket_skip)
return False
elif self.specs.type('skip') is not bool and self.specs.isValid('skip'):
skip_message = self.specs['skip']
# We might want to trim the string so it formats nicely
if len(skip_message) >= util.TERM_COLS - (len(self.specs['test_name'])+21):
reason = (skip_message)[:(util.TERM_COLS - (len(self.specs['test_name'])+24))] + '...'
else:
reason = skip_message
self.setStatus(reason, self.bucket_skip)
return False
# If were testing for SCALE_REFINE, then only run tests with a SCALE_REFINE set
elif (options.store_time or options.scaling) and self.specs['scale_refine'] == 0:
self.setStatus('silent', self.bucket_silent)
return False
# If we're testing with valgrind, then skip tests that require parallel or threads or don't meet the valgrind setting
elif options.valgrind_mode != '':
if self.specs['valgrind'].upper() == 'NONE':
reason = 'Valgrind==NONE'
elif self.specs['valgrind'].upper() == 'HEAVY' and options.valgrind_mode.upper() == 'NORMAL':
reason = 'Valgrind==HEAVY'
elif self.specs['min_parallel'] > 1 or self.specs['min_threads'] > 1:
reason = 'Valgrind requires serial'
if reason != '':
self.setStatus(reason, self.bucket_skip)
return False
# If we're running in recover mode skip tests that have recover = false
elif options.enable_recover and self.specs['recover'] == False:
reason = 'NO RECOVER'
self.setStatus(reason, self.bucket_skip)
return False
# Check for PETSc versions
(petsc_status, logic_reason, petsc_version) = util.checkPetscVersion(checks, self.specs)
if not petsc_status:
reason = 'using PETSc ' + str(checks['petsc_version']) + ' REQ: ' + logic_reason + ' ' + petsc_version
self.setStatus(reason, self.bucket_skip)
return False
# PETSc is being explicitly checked above
local_checks = ['platform', 'compiler', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids', 'vtk', 'tecplot', \
'petsc_debug', 'curl', 'tbb', 'superlu', 'cxx11', 'asio', 'unique_id', 'slepc']
for check in local_checks:
test_platforms = set()
operator_display = '!='
inverse_set = False
for x in self.specs[check]:
if x[0] == '!':
if inverse_set:
reason = 'Multiple Negation Unsupported'
self.setStatus(reason, self.bucket_skip)
return False
inverse_set = True
operator_display = '=='
x = x[1:] # Strip off the !
x_upper = x.upper()
if x_upper in test_platforms:
reason = 'Duplicate Entry or Negative of Existing Entry'
self.setStatus(reason, self.bucket_skip)
return False
test_platforms.add(x.upper())
match_found = len(test_platforms.intersection(checks[check])) > 0
# Either we didn't find the match when we were using normal "include" logic
# or we did find the match when we wanted to exclude it
if inverse_set == match_found:
reason = re.sub(r'\[|\]', '', check).upper() + operator_display + ', '.join(test_platforms)
self.setStatus(reason, self.bucket_skip)
return False
# Check for heavy tests
if options.all_tests or options.heavy_tests:
if not self.specs['heavy'] and options.heavy_tests:
reason = 'NOT HEAVY'
self.setStatus(reason, self.bucket_silent)
return False
elif self.specs['heavy']:
reason = 'HEAVY'
self.setStatus(reason, self.bucket_skip)
return False
# Check for positive scale refine values when using store timing options
if self.specs['scale_refine'] == 0 and options.store_time:
self.setStatus('scale_refine==0 store_time=True', self.bucket_skip)
return False
# There should only be one entry in self.specs['dof_id_bytes']
for x in self.specs['dof_id_bytes']:
if x != 'ALL' and not x in checks['dof_id_bytes']:
reason = '--with-dof-id-bytes!=' + x
self.setStatus(reason, self.bucket_skip)
return False
# Check to make sure depend files exist
for file in self.specs['depend_files']:
if not os.path.isfile(os.path.join(self.specs['base_dir'], file)):
reason = 'DEPEND FILES'
self.setStatus(reason, self.bucket_skip)
return False
# Check to make sure required submodules are initialized
for var in self.specs['required_submodule']:
if var not in checks["submodules"]:
reason = '%s submodule not initialized' % var
self.setStatus(reason, self.bucket_skip)
return False
# Check to make sure environment variable exists
for var in self.specs['env_vars']:
if not os.environ.has_key(var):
reason = 'ENV VAR NOT SET'
self.setStatus(reason, self.bucket_skip)
return False
# Check for display
if self.specs['display_required'] and not os.getenv('DISPLAY', False):
reason = 'NO DISPLAY'
self.setStatus(reason, self.bucket_skip)
return False
# Check the return values of the derived classes
return self.checkRunnable(options)
| backmari/moose | python/TestHarness/testers/Tester.py | Python | lgpl-2.1 | 18,823 | 0.004835 |
"""
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} in not "
"compat with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
.. deprecated:: 0.18.0
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, concat
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg2.columns.names)
result.index = result.index.set_names(
arg1.index.names + arg1.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| Winand/pandas | pandas/core/window.py | Python | bsd-3-clause | 68,740 | 0.000029 |
#!/usr/bin/env python
#### Sudoku generator ####
import random
import time
from collections import defaultdict
class Square(object):
'''Main class holding the attributes for each square of the sudoku'''
def __init__(self, x, y):
self.value = None
self.x = x
self.y = y
self.free = range(1, 10)
self.region = None
def addValue(self, value):
self.value = value
def addRegion(self, region):
self.region = region
def removeFromFreeValues(self, value):
self.free.remove(value)
def restoreFree(self):
self.free = range(1,10)
def removeValue(self):
self.value = None
def getValue(self):
return self.value
def getX(self):
return self.x
def getY(self):
return self.y
def getFree(self):
return self.free
def getRegion(self):
return self.region
def createBoard():
board = [ Square(y, x) for x in range(9) for y in range(9) ]
return board
def defineRegions(board):
for square in board:
if square.getX() < 3 and square.getY() < 3:
square.addRegion(0)
elif 3 <= square.getX() < 6 and square.getY() < 3:
square.addRegion(1)
elif 6 <= square.getX() < 9 and square.getY() < 3:
square.addRegion(2)
elif square.getX() < 3 and 3 <= square.getY() < 6:
square.addRegion(3)
elif 3 <= square.getX() < 6 and 3 <= square.getY() < 6:
square.addRegion(4)
elif 6 <= square.getX() < 9 and 3 <= square.getY() < 6:
square.addRegion(5)
elif square.getX() < 3 and 6 <= square.getY() < 9:
square.addRegion(6)
elif 3 <= square.getX() < 6 and 6 <= square.getY() < 9:
square.addRegion(7)
elif 6 <= square.getX() < 9 and 6 <= square.getY() < 9:
square.addRegion(8)
def defineXs(board):
Xdict = {}
for i in range(9):
x_squares = []
for square in board:
if square.getX() == i:
x_squares.append(square)
Xdict[i] = x_squares
return Xdict
def defineYs(board):
Ydict = {}
for i in range(9):
y_squares = []
for square in board:
if square.getY() == i:
y_squares.append(square)
Ydict[i] = y_squares
return Ydict
def defineRegionslist(board):
regions = {}
for i in range(9):
r_squares = []
for square in board:
if square.getRegion() == i:
r_squares.append(square)
regions[i] = r_squares
return regions
def checkIfFree(board, current_square):
free_values = current_square.getFree()
if len(free_values) < 1:
return False
else:
return True
def setValueOnce(value, current_square):
current_square.addValue(value)
current_square.removeFromFreeValues(value)
def checkXValidity(board, current_square):
sameXlist = defineXs(board)[current_square.getX()]
sameXlist.remove(current_square)
x_values = []
for square in sameXlist:
x_values.append(square.getValue())
if current_square.getValue() in x_values:
return False
else:
return True
def checkYValidity(board, current_square):
sameYlist = defineYs(board)[current_square.getY()]
sameYlist.remove(current_square)
y_values = []
for square in sameYlist:
y_values.append(square.getValue())
if current_square.getValue() in y_values:
return False
else:
return True
def checkRegionValidity(board, current_square):
sameRegionlist = defineRegionslist(board)[current_square.getRegion()]
sameRegionlist.remove(current_square)
r_values = []
for square in sameRegionlist:
r_values.append(square.getValue())
if current_square.getValue() in r_values:
return False
else:
return True
def checkConditions(board, square):
if checkXValidity(board, square) == checkYValidity(board, square) == checkRegionValidity(board, square) == True:
return True
else:
return False
def CreateSudoku():
board = createBoard()
defineRegions(board)
index = 0
while index < 81:
current_square = board[index]
if checkIfFree(board, current_square) == False:
current_square.restoreFree()
current_square.removeValue()
index -= 1
continue
value = random.choice(current_square.getFree())
setValueOnce(value, current_square)
if checkConditions(board, current_square) == False:
continue
else:
index += 1
return board
def printSudoku(board):
line = "#---+---+---#---+---+---#---+---+---#"
line_thick = "#####################################"
print line
for s in board:
if (s.getX() ) % 3 == 0:
print '# ',
elif random.random() > 0.3:
print '| ',
else:
print '| %d' %(s.getValue()),
if (s.getX() +1) % 9 == 0:
if (s.getY() + 1) % 3 == 0:
print '#\n', line_thick
else:
print '#\n', line
if __name__ == "__main__":
sudoku = CreateSudoku()
printSudoku(sudoku)
| michalczaplinski/sudoku | sudoku_generator.py | Python | bsd-3-clause | 5,317 | 0.00583 |
#!/usr/bin/env python
from __future__ import absolute_import
import urllib2
from pytomo import lib_youtube_download
from pytomo import start_pytomo
start_pytomo.configure_log_file('http_test')
ip_address_uri = ("http://173.194.5.107/videoplayback?sparams=id%2Cexpire%2Cip%2Cipbits%2Citag%2Calgorithm%2Cburst%2Cfactor&algorithm=throttle-factor&itag=34&ipbits=8&burst=40&sver=3&signature=CE60F2B393D8E55A0B8529FCB0AAEDEC876A2C8C.9DAE7AE311AD2D4AE8094715551F8E2482DEA790&expire=1304107200&key=yt1&ip=193.0.0.0&factor=1.25&id=39d17ea226880992")
info = {'accept-ranges': 'bytes',
'cache-control': 'private, max-age=20576',
'connection': 'close',
'Content-length': '16840065',
'content-type': 'video/x-flv',
'date': 'Fri, 29 Apr 2011 14:12:04 GMT',
'expires': 'Fri, 29 Apr 2011 19:55:00 GMT',
'last-modified': 'Fri, 18 Jun 2010 12:05:11 GMT',
'server': 'gvs 1.0',
'via': '1.1 goodway (NetCache NetApp/6.1.1), 1.1 s-proxy (NetCache NetApp/ 5.6.2R2)',
'x-content-type-options': 'nosniff'}
def mock_response(req):
if req.get_full_url() == ip_address_uri:
mock_file = open('test_pytomo/OdF-oiaICZI.flv')
resp = urllib2.addinfourl(mock_file,info ,
req.get_full_url())
resp.code = 200
resp.msg = "OK"
return resp
class MyHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
print "mock opener"
return mock_response(req)
my_opener = urllib2.build_opener(MyHTTPHandler)
urllib2.install_opener(my_opener)
filedownloader = lib_youtube_download.FileDownloader(30)
h = filedownloader._do_download(ip_address_uri)
print h
#
#response = urllib2.urlopen(ip_address_uri)
#print response.read()
#print response.code
#print response.msg
| Jamlum/pytomo | test_http_server.py | Python | gpl-2.0 | 1,820 | 0.006044 |
from django.contrib import admin
from models import FileSet
admin.site.register(FileSet)
| oswalpalash/OctaveCodeShare | scipy_central/filestorage/admin.py | Python | bsd-3-clause | 89 | 0 |
from node import DotXMLDoc, AttributeParsingError
class SimpleXmlApi(object):
"""
The main API class, comprising a map of attributes to dotted path names.
Accessing an attribute that has been mapped to a dotted name will return
the text value of that node/attribute. If an attribute is passed that
isn't in the map, it's passed off to the L{DotXMLDoc} instance, so that
the document can be walked manually.
May be subclassed, overriding C{_map}, to provide custom APIs for known XML
structures.
"""
_map = {}
_doc = None
def __init__(self, source="", map=None):
"""
@param source: A string containing an XML document
@type source: str
@param map:
@type map: dict
@return: void
"""
if map is not None:
self.load_map(map)
self.load_source(source)
def add_mapping(self, name, path):
"""
Add a new attribute - dotted name mapping to the instance's map
registry.
@param name: The name of the attribute.
@type name: str
@param path: A dotted name that can be traversed.
@type path: str
@return: void
"""
self._map[name] = path
def load_source(self, source):
"""
Parse an XML document and set it as this API's target.
@param source: A string containing an XML document.
@type source: str
@return: void
"""
self._doc = DotXMLDoc(source)
def load_map(self, map):
"""
Update the attribute registry with one or more mappings. Will not
remove attributes that currently exist.
@param map: A dictionary of the form C{\{'attribute':'dotted.name'\}}
@type map: dict
@return: void
"""
self._map.update(map)
def del_mapping(self, name):
"""
Remove an attribute mapping from the registry.
@param name: The name of the attribute to remove from the registry.
@type name: str
@return: void
"""
try: del self._map[name]
except KeyError: pass
def __getattr__(self, attr):
try:
return self._traverse(self._map[attr])
except KeyError:
return getattr(self._doc, attr)
def _traverse(self, path):
"""
Traverse a dotted path against the XML document in memory and return
its text value.
@param path: A dotted path that will resolve to a node or attribute.
@type path: str
@return: The text value of the node.
@rtype: str
"""
try:
return eval("self._doc.%s" % path).getValue()
except SyntaxError:
raise AttributeParsingError
def factory(source, map=None, cls=None):
"""
Create a new L{SimpleXmlApi} instance using the given source and optional
attribute map.
To create an instance of a subclass, pass in the C{cls} attribute.
"""
if cls is None:
cls = SimpleXmlApi
return cls(source, map)
| iancmcc/simplexmlapi | simplexmlapi/api.py | Python | mit | 3,090 | 0.002913 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import *
class TxnMallTest(BitsendTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BSD:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 BSD to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BSD coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BSD for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BSD for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
| madzebra/BitSend | qa/rpc-tests/txn_doublespend.py | Python | mit | 6,649 | 0.004362 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Idea.color'
db.add_column(u'brainstorming_idea', 'color',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Idea.color'
db.delete_column(u'brainstorming_idea', 'color')
models = {
u'brainstorming.brainstorming': {
'Meta': {'ordering': "['-created']", 'object_name': 'Brainstorming'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'brainstorming.brainstormingwatcher': {
'Meta': {'ordering': "['-created']", 'unique_together': "(('brainstorming', 'email'),)", 'object_name': 'BrainstormingWatcher'},
'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'brainstorming.emailverification': {
'Meta': {'ordering': "['-created']", 'object_name': 'EmailVerification'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'brainstorming.idea': {
'Meta': {'ordering': "['-created']", 'object_name': 'Idea'},
'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'creator_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'ratings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['brainstorming'] | atizo/braindump | brainstorming/migrations/0005_auto__add_field_idea_color.py | Python | mit | 4,031 | 0.007938 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import tempfile
import numpy
import scipy.linalg
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamulticore
from gensim.models.wrappers import ldamallet
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
class TestLsiModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(numpy.allclose(s[:2], model.projection.s)) # singular values must match
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.6594664, 0.142115444]) # scaled LSI version
# expected = numpy.array([-0.1973928, 0.05591352]) # non-scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected))) # transformed entries must be equal up to sign
def testCorpusTransform(self):
"""Test lsi[corpus] transformation."""
model = lsimodel.LsiModel(self.corpus, num_topics=2)
got = numpy.vstack(matutils.sparse2full(doc, 2) for doc in model[self.corpus])
expected = numpy.array([
[ 0.65946639, 0.14211544],
[ 2.02454305, -0.42088759],
[ 1.54655361, 0.32358921],
[ 1.81114125, 0.5890525 ],
[ 0.9336738 , -0.27138939],
[ 0.01274618, -0.49016181],
[ 0.04888203, -1.11294699],
[ 0.08063836, -1.56345594],
[ 0.27381003, -1.34694159]])
self.assertTrue(numpy.allclose(abs(got), abs(expected))) # must equal up to sign
def testOnlineTransform(self):
corpus = list(self.corpus)
doc = corpus[0] # use the corpus' first document for testing
# create the transformation model
model2 = lsimodel.LsiModel(corpus=corpus, num_topics=5) # compute everything at once
model = lsimodel.LsiModel(corpus=None, id2word=model2.id2word, num_topics=5) # start with no documents, we will add them later
# train model on a single document
model.add_documents([corpus[0]])
# transform the testing document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-1.73205078, 0.0, 0.0, 0.0, 0.0]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on another 4 documents
model.add_documents(corpus[1:5], chunksize=2) # train on 4 extra docs, in chunks of 2 documents, for the lols
# transform a document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on the rest of documents
model.add_documents(corpus[5:])
# make sure the final transformation is the same as if we had decomposed the whole corpus at once
vec1 = matutils.sparse2full(model[doc], model.num_topics)
vec2 = matutils.sparse2full(model2[doc], model2.num_topics)
self.assertTrue(numpy.allclose(abs(vec1), abs(vec2), atol=1e-5)) # the two LSI representations must equal up to sign
def testPersistence(self):
fname = testfile()
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(fname)
model2 = lsimodel.LsiModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(fname)
model2 = lsimodel.LsiModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
model2 = lsimodel.LsiModel.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.projection.u, numpy.memmap))
self.assertTrue(isinstance(model2.projection.s, numpy.memmap))
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
return
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
self.assertRaises(IOError, lsimodel.LsiModel.load, fname, mmap='r')
#endclass TestLsiModel
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
numpy.random.seed(13) # HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.70710677, 0.70710677])
self.assertTrue(numpy.allclose(vec, expected)) # transformed entries must be equal up to sign
def testPersistence(self):
fname = testfile()
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestRpModel
class TestLdaModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(5): # restart at most 5 times
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100)
model.update(self.corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testTopTopics(self):
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100)
model.update(self.corpus)
model.top_topics(self.corpus)
def testPasses(self):
# long message includes the original error message with a custom one
self.longMessage = True
# construct what we expect when passes aren't involved
test_rhots = list()
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2)
final_rhot = lambda: pow(model.offset + (1 * model.num_updates) / model.chunksize, -model.decay)
# generate 5 updates to test rhot on
for x in range(5):
model.update(self.corpus)
test_rhots.append(final_rhot())
for passes in [1, 5, 10, 50, 100]:
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2, passes=passes)
self.assertEqual(final_rhot(), 1.0)
# make sure the rhot matches the test after each update
for test_rhot in test_rhots:
model.update(self.corpus)
msg = ", ".join(map(str, [passes, model.num_updates, model.state.numdocs]))
self.assertAlmostEqual(final_rhot(), test_rhot, msg=msg)
self.assertEqual(model.state.numdocs, len(corpus) * len(test_rhots))
self.assertEqual(model.num_updates, len(corpus) * len(test_rhots))
# def testTopicSeeding(self):
# for topic in range(2):
# passed = False
# for i in range(5): # restart at most this many times, to mitigate LDA randomness
# # try seeding it both ways round, check you get the same
# # topics out but with which way round they are depending
# # on the way round they're seeded
# eta = numpy.ones((2, len(dictionary))) * 0.5
# system = dictionary.token2id[u'system']
# trees = dictionary.token2id[u'trees']
# # aggressively seed the word 'system', in one of the
# # two topics, 10 times higher than the other words
# eta[topic, system] *= 10.0
# model = self.class_(id2word=dictionary, num_topics=2, passes=200, eta=eta)
# model.update(self.corpus)
# topics = [dict((word, p) for p, word in model.show_topic(j, topn=None)) for j in range(2)]
# # check that the word 'system' in the topic we seeded got a high weight,
# # and the word 'trees' (the main word in the other topic) a low weight --
# # and vice versa for the other topic (which we didn't seed with 'system')
# passed = (
# (topics[topic][u'system'] > topics[topic][u'trees'])
# and
# (topics[1 - topic][u'system'] < topics[1 - topic][u'trees'])
# )
# if passed:
# break
# logging.warning("LDA failed to converge on attempt %i (got %s)", i, topics)
# self.assertTrue(passed)
def testPersistence(self):
fname = testfile()
model = self.class_(self.corpus, num_topics=2)
model.save(fname)
model2 = self.class_.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = self.class_(self.corpus, num_topics=2)
model.save(fname)
model2 = self.class_.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = self.class_(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = self.class_.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, numpy.memmap))
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = self.class_(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, self.class_.load, fname, mmap='r')
#endclass TestLdaModel
class TestLdaMulticore(TestLdaModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamulticore.LdaMulticore
#endclass TestLdaMulticore
class TestLdaMallet(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
def testTransform(self):
if not self.mallet_path:
return
passed = False
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamallet.LdaMallet(self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=200)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.49, 0.51]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testPersistence(self):
if not self.mallet_path:
return
fname = testfile()
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
if not self.mallet_path:
return
fname = testfile() + '.gz'
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
if not self.mallet_path:
return
fname = testfile()
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.wordtopics, numpy.memmap))
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
if not self.mallet_path:
return
fname = testfile() + '.gz'
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, ldamodel.LdaModel.load, fname, mmap='r')
#endclass TestLdaMallet
class TestTfidfModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(numpy.allclose(transformed, expected))
def testInit(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(corpus)
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dictionary.dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dictionary.dfs, len(corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def testPersistence(self):
fname = testfile()
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestTfidfModel
class TestLogEntropyModel(unittest.TestCase):
def setUp(self):
self.corpus_small = mmcorpus.MmCorpus(datapath('test_corpus_small.mm'))
self.corpus_ok = mmcorpus.MmCorpus(datapath('test_corpus_ok.mm'))
def testTransform(self):
# create the transformation model
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=False)
# transform one document
doc = list(self.corpus_ok)[0]
transformed = model[doc]
expected = [(0, 0.3748900964125389),
(1, 0.30730215324230725),
(3, 1.20941755462856)]
self.assertTrue(numpy.allclose(transformed, expected))
def testPersistence(self):
fname = testfile()
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname, mmap=None)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
#endclass TestLogEntropyModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| jyt109/gensim | gensim/test/test_models.py | Python | gpl-3.0 | 23,020 | 0.006125 |
#!/usr/bin/env python
"""Robobonobo setup script.
Usage:
./get_ready.py [options]
Options:
-h, --help Show this help screen
--version Show the version.
"""
from docopt import docopt
from glob import glob
import os
GPIOS = [30, 31, 112, 113, 65, 27]
GPIO_BASE = "/sys/class/gpio"
SLOTS_GLOB = "/sys/devices/bone_capemgr.?/slots"
def write_gpio(filename, msg, pindir=""):
with open(os.path.join(GPIO_BASE, pindir, filename), mode="w+") as ex:
ex.write(msg)
def setup_gpio(pin):
write_gpio("export", pin)
pindir = "gpio" + pin
write_gpio("direction", "out", pindir)
write_gpio("value", "0", pindir)
def setup_dto():
for match in glob(SLOTS_GLOB):
with open(match, mode="w+") as slots:
slots.write("robobonobo")
def main():
for gpio in GPIOS:
setup_gpio(str(gpio))
setup_dto()
if __name__ == "__main__":
args = docopt(__doc__, version="Robobonobo setup script v1")
main()
| dunmatt/robobonobo | scripts/get_ready.py | Python | mit | 979 | 0 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is used by chrome_tests.gypi's js2webui action to maintain the
argument lists and to generate inlinable tests.
"""
import json
import optparse
import os
import subprocess
import sys
import shutil
def HasSameContent(filename, content):
'''Returns true if the given file is readable and has the given content.'''
try:
with open(filename) as file:
return file.read() == content
except:
# Ignore all errors and fall back on a safe bet.
return False
def main ():
parser = optparse.OptionParser()
parser.set_usage(
"%prog v8_shell mock.js test_api.js js2webui.js "
"testtype inputfile inputrelfile cxxoutfile jsoutfile")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-n', '--impotent', action='store_true',
help="don't execute; just print (as if verbose)")
parser.add_option('--deps_js', action="store",
help=("Path to deps.js for dependency resolution, " +
"optional."))
parser.add_option('--external', action='store',
help="Load V8's initial snapshot from external files (y/n)")
(opts, args) = parser.parse_args()
if len(args) != 9:
parser.error('all arguments are required.')
(v8_shell, mock_js, test_api, js2webui, test_type,
inputfile, inputrelfile, cxxoutfile, jsoutfile) = args
cmd = [v8_shell]
icudatafile = os.path.join(os.path.dirname(v8_shell), 'icudtl.dat')
if os.path.exists(icudatafile):
cmd.extend(['--icu-data-file=%s' % icudatafile])
v8nativesfile = os.path.join(os.path.dirname(v8_shell), 'natives_blob.bin')
if opts.external == 'y' and os.path.exists(v8nativesfile):
cmd.extend(['--natives_blob=%s' % v8nativesfile])
v8snapshotfile = os.path.join(os.path.dirname(v8_shell), 'snapshot_blob.bin')
if opts.external == 'y' and os.path.exists(v8snapshotfile):
cmd.extend(['--snapshot_blob=%s' % v8snapshotfile])
arguments = [js2webui, inputfile, inputrelfile, opts.deps_js,
cxxoutfile, test_type]
cmd.extend(['-e', "arguments=" + json.dumps(arguments), mock_js,
test_api, js2webui])
if opts.verbose or opts.impotent:
print cmd
if not opts.impotent:
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
if not HasSameContent(cxxoutfile, out):
with open(cxxoutfile, 'wb') as f:
f.write(out)
shutil.copyfile(inputfile, jsoutfile)
except Exception, ex:
if os.path.exists(cxxoutfile):
os.remove(cxxoutfile)
if os.path.exists(jsoutfile):
os.remove(jsoutfile)
raise
if __name__ == '__main__':
sys.exit(main())
| hgl888/chromium-crosswalk | tools/gypv8sh.py | Python | bsd-3-clause | 2,927 | 0.012641 |
#! /usr/bin/env python
from __future__ import print_function
import os
import time
from github import Github
gh = Github(os.environ['DMWMBOT_TOKEN'])
codeRepo = os.environ.get('CODE_REPO', 'WMCore')
teamName = os.environ.get('WMCORE_REPO', 'dmwm')
repoName = '%s/%s' % (teamName, codeRepo)
issueID = None
if 'ghprbPullId' in os.environ:
issueID = os.environ['ghprbPullId']
mode = 'PR'
elif 'TargetIssueID' in os.environ:
issueID = os.environ['TargetIssueID']
mode = 'Daily'
print("Looking for %s issue %s" % (repoName, issueID))
repo = gh.get_repo(repoName)
issue = repo.get_issue(int(issueID))
reportURL = os.environ['BUILD_URL']
lastCommit = repo.get_pull(int(issueID)).get_commits().get_page(0)[-1]
lastCommit.create_status(state='pending', target_url=reportURL,
description='Tests started at ' + time.strftime("%d %b %Y %H:%M GMT"))
| dmwm/Docker | jenkins_python/scripts/PullRequestTestBegin.py | Python | apache-2.0 | 887 | 0.001127 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Manage data for pretraining and RL tasks."""
import ast
from collections import namedtuple
from absl import logging
from single_task import code_tasks # brain coder
RLBatch = namedtuple('RLBatch', ['reward_fns', 'batch_size', 'good_reward'])
class DataManager(object):
"""Interface between environment and model."""
def __init__(self, global_config, run_number=None,
do_code_simplification=False):
"""Constructs a DataManager.
Args:
global_config: A config_lib.Config instance containing all config. See
config in defaults.py.
run_number: Which run this is (of the same experiment). This should be set
when a task cycle is defined in the config. A task cycle is a list of
tasks to cycle through repeatedly, and the selected task is a function
of the run number, i.e. 0-th run, 1-st run, 2-nd run, etc...
This can be None if only a single task is set in the config.
do_code_simplification: When global_config.env.config_for_iclr is True,
use this option to create code simplification (code golf) tasks, vs
fixed length coding tasks. If True, a task with code simplification
reward will be constructed.
Raises:
ValueError: If global_config.env.task and global_config.env.task_cycle
are both set, or both not set. Only one should be given.
ValueError: If global_config.env.task_cycle is set but run_number is None.
"""
env_config = global_config.env
self.batch_size = global_config.batch_size
if env_config.task_cycle:
if env_config.task:
raise ValueError('Do not set both `task` and `task_cycle`.')
if run_number is None:
raise ValueError('Do not use task_cycle for single-run experiment.')
index = run_number % len(env_config.task_cycle)
self.task_name = env_config.task_cycle[index]
logging.info('run_number: %d, task_cycle index: %d', run_number, index)
logging.info('task_cycle: %s', env_config.task_cycle)
elif env_config.task:
self.task_name = env_config.task
else:
raise ValueError('Either `task` or `task_cycle` must be set.')
logging.info('Task for this run: "%s"', self.task_name)
logging.info('config_for_iclr=True; do_code_simplification=%s',
do_code_simplification)
self.rl_task = code_tasks.make_task(
task_name=self.task_name,
override_kwargs=ast.literal_eval(env_config.task_kwargs),
max_code_length=global_config.timestep_limit,
require_correct_syntax=env_config.correct_syntax,
do_code_simplification=do_code_simplification,
correct_bonus=env_config.task_manager_config.correct_bonus,
code_length_bonus=env_config.task_manager_config.code_length_bonus)
def sample_rl_batch(self):
"""Create reward functions from the current task.
Returns:
RLBatch namedtuple instance, which holds functions and information for
a minibatch of episodes.
* reward_fns: A reward function for each episode. Maps code string to
reward.
* batch_size: Number of episodes in this minibatch.
* good_reward: Estimated threshold of rewards which indicate the algorithm
is starting to solve the task. This is a heuristic that tries to
reduce the amount of stuff written to disk.
"""
reward_fns = self.rl_task.rl_batch(self.batch_size)
return RLBatch(
reward_fns=reward_fns,
batch_size=self.batch_size,
good_reward=self.rl_task.good_reward)
| jiaphuan/models | research/brain_coder/single_task/data.py | Python | apache-2.0 | 3,685 | 0.004071 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0020_auto_20160602_1607'),
]
operations = [
migrations.AddField(
model_name='blog',
name='status',
field=models.CharField(choices=[('U', 'Unprocessed'), ('R', 'On Google Drive (for review)'), ('L', 'On pipeline to be published'), ('P', 'Published'), ('D', 'Declined'), ('O', 'Out of date')], default='U', max_length=1),
),
]
| softwaresaved/fat | lowfat/migrations/0021_blog_status.py | Python | bsd-3-clause | 627 | 0.001595 |
import website_forum
| lamaisondub/lamaisondub-custom | website_forum_private/models/__init__.py | Python | agpl-3.0 | 21 | 0 |
from setuptools import setup
setup(name='ldsc',
version='1.0',
description='LD Score Regression (LDSC)',
url='http://github.com/bulik/ldsc',
author='Brendan Bulik-Sullivan and Hilary Finucane',
author_email='',
license='GPLv3',
packages=['ldscore'],
scripts=['ldsc.py', 'munge_sumstats.py'],
install_requires = [
'bitarray>=0.8,<0.9',
'nose>=1.3,<1.4',
'pybedtools>=0.7,<0.8',
'scipy>=0.18,<0.19',
'numpy>=1.16,<1.17',
'pandas>=0.20,<0.21'
]
)
| bulik/ldsc | setup.py | Python | gpl-3.0 | 577 | 0.005199 |
import pandas as pd
import pandas.testing as tm
import pyspark.sql.functions as F
import pytest
from pyspark.sql import Window
import ibis
@pytest.mark.parametrize(
('ibis_windows', 'spark_range'),
[
([(ibis.interval(hours=1), 0)], (-3600, 0)), # 1h back looking window
([(ibis.interval(hours=2), 0)], (-7200, 0)), # 2h back looking window
(
[(0, ibis.interval(hours=1))],
(0, 3600),
), # 1h forward looking window
(
[(ibis.interval(hours=1), ibis.interval(hours=1))],
(-3600, 3600),
), # both forward and trailing
],
indirect=['ibis_windows'],
)
def test_window_with_timecontext(client, ibis_windows, spark_range):
"""Test context adjustment for trailing / range window
We expand context according to window sizes, for example, for a table of:
time value
2020-01-01 a
2020-01-02 b
2020-01-03 c
2020-01-04 d
with context = (2020-01-03, 2002-01-04) trailing count for 1 day will be:
time value count
2020-01-03 c 2
2020-01-04 d 2
trailing count for 2 days will be:
time value count
2020-01-03 c 3
2020-01-04 d 3
with context = (2020-01-01, 2002-01-02) count for 1 day forward looking
window will be:
time value count
2020-01-01 a 2
2020-01-02 b 2
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170103', tz='UTC'),
)
result_pd = table.mutate(
count=table['value'].count().over(ibis_windows[0])
).execute(timecontext=context)
spark_table = table.compile()
spark_window = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range)
)
expected = spark_table.withColumn(
'count',
F.count(spark_table['value']).over(spark_window),
).toPandas()
expected = expected[
expected.time.between(*(t.tz_convert(None) for t in context))
].reset_index(drop=True)
tm.assert_frame_equal(result_pd, expected)
@pytest.mark.parametrize(
('ibis_windows', 'spark_range'),
[([(None, 0)], (Window.unboundedPreceding, 0))],
indirect=['ibis_windows'],
)
def test_cumulative_window(client, ibis_windows, spark_range):
"""Test context adjustment for cumulative window
For cumulative window, by defination we should look back infinately.
When data is trimmed by time context, we define the limit of looking
back is the start time of given time context. Thus for a table of
time value
2020-01-01 a
2020-01-02 b
2020-01-03 c
2020-01-04 d
with context = (2020-01-02, 2002-01-03) cumulative count will be:
time value count
2020-01-02 b 1
2020-01-03 c 2
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170105', tz='UTC'),
)
result_pd = table.mutate(
count_cum=table['value'].count().over(ibis_windows[0])
).execute(timecontext=context)
spark_table = table.compile(timecontext=context)
spark_window = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range)
)
expected = spark_table.withColumn(
'count_cum',
F.count(spark_table['value']).over(spark_window),
).toPandas()
expected = expected[
expected.time.between(*(t.tz_convert(None) for t in context))
].reset_index(drop=True)
tm.assert_frame_equal(result_pd, expected)
@pytest.mark.parametrize(
('ibis_windows', 'spark_range'),
[
(
[(ibis.interval(hours=1), 0), (ibis.interval(hours=2), 0)],
[(-3600, 0), (-7200, 0)],
)
],
indirect=['ibis_windows'],
)
def test_multiple_trailing_window(client, ibis_windows, spark_range):
"""Test context adjustment for multiple trailing window
When there are multiple window ops, we need to verify contexts are
adjusted correctly for all windows. In this tests we are constucting
one trailing window for 1h and another trailng window for 2h
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170105', tz='UTC'),
)
result_pd = table.mutate(
count_1h=table['value'].count().over(ibis_windows[0]),
count_2h=table['value'].count().over(ibis_windows[1]),
).execute(timecontext=context)
spark_table = table.compile()
spark_window_1h = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[0])
)
spark_window_2h = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[1])
)
expected = (
spark_table.withColumn(
'count_1h', F.count(spark_table['value']).over(spark_window_1h)
)
.withColumn(
'count_2h', F.count(spark_table['value']).over(spark_window_2h)
)
.toPandas()
)
expected = expected[
expected.time.between(*(t.tz_convert(None) for t in context))
].reset_index(drop=True)
tm.assert_frame_equal(result_pd, expected)
@pytest.mark.parametrize(
('ibis_windows', 'spark_range'),
[
(
[(ibis.interval(hours=1), 0), (ibis.interval(hours=2), 0)],
[(-3600, 0), (-7200, 0)],
)
],
indirect=['ibis_windows'],
)
def test_chained_trailing_window(client, ibis_windows, spark_range):
"""Test context adjustment for chained windows
When there are chained window ops, we need to verify contexts are
adjusted correctly for all windows. In this tests we are constucting
one trailing window for 1h and trailng window on the new column for
2h
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170105', tz='UTC'),
)
table = table.mutate(
new_col=table['value'].count().over(ibis_windows[0]),
)
table = table.mutate(count=table['new_col'].count().over(ibis_windows[1]))
result_pd = table.execute(timecontext=context)
spark_table = table.compile()
spark_window_1h = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[0])
)
spark_window_2h = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[1])
)
spark_table = spark_table.withColumn(
'new_col', F.count(spark_table['value']).over(spark_window_1h)
)
spark_table = spark_table.withColumn(
'count', F.count(spark_table['new_col']).over(spark_window_2h)
)
expected = spark_table.toPandas()
expected = expected[
expected.time.between(*(t.tz_convert(None) for t in context))
].reset_index(drop=True)
tm.assert_frame_equal(result_pd, expected)
@pytest.mark.xfail(
reason='Issue #2457 Adjust context properly for mixed rolling window,'
' cumulative window and non window ops',
strict=True,
)
@pytest.mark.parametrize(
('ibis_windows', 'spark_range'),
[
(
[(ibis.interval(hours=1), 0), (None, 0)],
[(-3600, 0), (Window.unboundedPreceding, 0)],
)
],
indirect=['ibis_windows'],
)
def test_rolling_with_cumulative_window(client, ibis_windows, spark_range):
"""Test context adjustment for rolling window and cumulative window
cumulative window should calculate only with in user's context,
while rolling window should calculate on expanded context.
For a rolling window of 1 day,
time value
2020-01-01 a
2020-01-02 b
2020-01-03 c
2020-01-04 d
with context = (2020-01-02, 2002-01-03), count will be:
time value roll_count cum_count
2020-01-02 b 2 1
2020-01-03 c 2 2
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170105', tz='UTC'),
)
result_pd = table.mutate(
count_1h=table['value'].count().over(ibis_windows[0]),
count_cum=table['value'].count().over(ibis_windows[1]),
).execute(timecontext=context)
spark_table = table.compile()
spark_window_1h = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[0])
)
spark_window_cum = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[1])
)
expected = (
spark_table.withColumn(
'count_1h', F.count(spark_table['value']).over(spark_window_1h)
)
.withColumn(
'count_cum', F.count(spark_table['value']).over(spark_window_cum)
)
.toPandas()
)
expected = expected[
expected.time.between(*(t.tz_convert(None) for t in context))
].reset_index(drop=True)
tm.assert_frame_equal(result_pd, expected)
@pytest.mark.xfail(
reason='Issue #2457 Adjust context properly for mixed rolling window,'
' cumulative window and non window ops',
strict=True,
)
@pytest.mark.parametrize(
('ibis_windows', 'spark_range'),
[([(ibis.interval(hours=1), 0)], [(-3600, 0)])],
indirect=['ibis_windows'],
)
def test_rolling_with_non_window_op(client, ibis_windows, spark_range):
"""Test context adjustment for rolling window and non window ops
non window ops should calculate only with in user's context,
while rolling window should calculate on expanded context.
For a rolling window of 1 day, and a `count` aggregation
time value
2020-01-01 a
2020-01-02 b
2020-01-03 c
2020-01-04 d
with context = (2020-01-02, 2002-01-04), result will be:
time value roll_count count
2020-01-02 b 2 3
2020-01-03 c 2 3
2020-01-04 d 2 3
Because there are 3 rows within user context (01-02, 01-04),
count should return 3 for every row, rather 4, based on the
adjusted context (01-01, 01-04).
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170105', tz='UTC'),
)
result_pd = table.mutate(
count_1h=table['value'].count().over(ibis_windows[0]),
count=table['value'].count(),
).execute(timecontext=context)
spark_table = table.compile()
spark_window_1h = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range[0])
)
expected = (
spark_table.withColumn(
'count_1h', F.count(spark_table['value']).over(spark_window_1h)
)
.withColumn('count', F.count(spark_table['value']))
.toPandas()
)
expected = expected[
expected.time.between(*(t.tz_convert(None) for t in context))
].reset_index(drop=True)
tm.assert_frame_equal(result_pd, expected)
@pytest.mark.xfail(
reason='Issue #2453 chain mutate() for window op and'
'non window op throws error for pyspark backend',
strict=True,
)
def test_complex_window(client):
"""Test window with different sizes
mix context adjustment for window op that require context
adjustment and non window op that doesn't adjust context
"""
table = client.table('time_indexed_table')
context = (
pd.Timestamp('20170102 07:00:00', tz='UTC'),
pd.Timestamp('20170105', tz='UTC'),
)
window = ibis.trailing_window(
preceding=ibis.interval(hours=1), order_by='time', group_by='key'
)
window2 = ibis.trailing_window(
preceding=ibis.interval(hours=2), order_by='time', group_by='key'
)
window_cum = ibis.cumulative_window(order_by='time', group_by='key')
# context should be adjusted accordingly for each window
result_pd = (
table.mutate(
count_1h=table['value'].count().over(window),
count_2h=table['value'].count().over(window2),
count_cum=table['value'].count().over(window_cum),
)
.mutate(count=table['value'].count())
.execute(timecontext=context)
)
df = table.execute()
expected_win_1h = (
df.set_index('time')
.groupby('key')
.value.rolling('1h', closed='both')
.count()
.rename('count_1h')
.astype(int)
)
expected_win_2h = (
df.set_index('time')
.groupby('key')
.value.rolling('2h', closed='both')
.count()
.rename('count_2h')
.astype(int)
)
expected_cum_win = (
df.set_index('time')
.groupby('key')
.value.expanding()
.count()
.rename('count_cum')
.astype(int)
)
df = df.set_index('time')
df = df.assign(
count_1h=expected_win_1h.sort_index(level=['time', 'key']).reset_index(
level='key', drop=True
)
)
df = df.assign(
count_2h=expected_win_2h.sort_index(level=['time', 'key']).reset_index(
level='key', drop=True
)
)
df = df.assign(
count_cum=expected_cum_win.sort_index(
level=['time', 'key']
).reset_index(level='key', drop=True)
)
df['count'] = df.groupby(['key'])['value'].transform('count')
df = df.reset_index()
expected = (
df[df.time.between(*(t.tz_convert(None) for t in context))]
.sort_values(['key'])
.reset_index(drop=True)
)
tm.assert_frame_equal(result_pd, expected)
| cloudera/ibis | ibis/backends/pyspark/tests/test_window_context_adjustment.py | Python | apache-2.0 | 13,958 | 0 |
import datetime
from django import template
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed, add_domain
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import iri_to_uri
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
USE_SINGLE_SIGNON = getattr(settings, "DISQUS_USE_SINGLE_SIGNON", False)
class WxrFeedType(feedgenerator.Rss201rev2Feed):
def rss_attributes(self):
return {
'version': self._version,
'xmlns:content': 'http://purl.org/rss/1.0/modules/content/',
'xmlns:dsq': 'http://www.disqus.com/',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:wp': 'http://wordpress.org/export/1.0/',
}
def format_date(self, date):
return date.strftime('%Y-%m-%d %H:%M:%S')
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': comments,
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def add_root_elements(self, handler):
pass
def add_item_elements(self, handler, item):
if item['comments'] is None:
return
handler.addQuickElement('title', item['title'])
handler.addQuickElement('link', item['link'])
handler.addQuickElement('content:encoded', item['description'])
handler.addQuickElement('dsq:thread_identifier', item['unique_id'])
handler.addQuickElement('wp:post_date_gmt',
self.format_date(item['pubdate']).decode('utf-8'))
handler.addQuickElement('wp:comment_status', item['comment_status'])
self.write_comments(handler, item['comments'])
def add_comment_elements(self, handler, comment):
if USE_SINGLE_SIGNON:
handler.startElement('dsq:remote', {})
handler.addQuickElement('dsq:id', comment['user_id'])
handler.addQuickElement('dsq:avatar', comment['avatar'])
handler.endElement('dsq:remote')
handler.addQuickElement('wp:comment_id', comment['id'])
handler.addQuickElement('wp:comment_author', comment['user_name'])
handler.addQuickElement('wp:comment_author_email', comment['user_email'])
handler.addQuickElement('wp:comment_author_url', comment['user_url'])
handler.addQuickElement('wp:comment_author_IP', comment['ip_address'])
handler.addQuickElement('wp:comment_date_gmt',
self.format_date(comment['submit_date']).decode('utf-8'))
handler.addQuickElement('wp:comment_content', comment['comment'])
handler.addQuickElement('wp:comment_approved', comment['is_approved'])
if comment['parent'] is not None:
handler.addQuickElement('wp:comment_parent', comment['parent'])
def write_comments(self, handler, comments):
for comment in comments:
handler.startElement('wp:comment', {})
self.add_comment_elements(handler, comment)
handler.endElement('wp:comment')
class BaseWxrFeed(Feed):
feed_type = WxrFeedType
def get_feed(self, obj, request):
current_site = Site.objects.get_current()
link = self._Feed__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link)
feed = self.feed_type(
title = self._Feed__get_dynamic_attr('title', obj),
link = link,
description = self._Feed__get_dynamic_attr('description', obj),
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = template.loader.get_template(self.title_template)
except template.TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = template.loader.get_template(self.description_template)
except template.TemplateDoesNotExist:
pass
for item in self._Feed__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(
template.RequestContext(request, {
'obj': item, 'site': current_site
}))
else:
title = self._Feed__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(
template.RequestContext(request, {
'obj': item, 'site': current_site
}))
else:
description = self._Feed__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self._Feed__get_dynamic_attr('item_link', item),
)
pubdate = self._Feed__get_dynamic_attr('item_pubdate', item)
if pubdate and not hasattr(pubdate, 'tzinfo'):
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self._Feed__get_dynamic_attr('item_guid', item, link),
pubdate = pubdate,
comment_status = self._Feed__get_dynamic_attr('item_comment_status', item, 'open'),
comments = self._get_comments(item)
)
return feed
def _get_comments(self, item):
cmts = self._Feed__get_dynamic_attr('item_comments', item)
output = []
for comment in cmts:
output.append({
'user_id': self._Feed__get_dynamic_attr('comment_user_id', comment),
'avatar': self._Feed__get_dynamic_attr('comment_avatar', comment),
'id': str(self._Feed__get_dynamic_attr('comment_id', comment)),
'user_name': self._Feed__get_dynamic_attr('comment_user_name', comment),
'user_email': self._Feed__get_dynamic_attr('comment_user_email', comment),
'user_url': self._Feed__get_dynamic_attr('comment_user_url', comment),
'ip_address': self._Feed__get_dynamic_attr('comment_ip_address', comment),
'submit_date': self._Feed__get_dynamic_attr('comment_submit_date', comment),
'comment': self._Feed__get_dynamic_attr('comment_comment', comment),
'is_approved': str(self._Feed__get_dynamic_attr('comment_is_approved', comment)),
'parent': str(self._Feed__get_dynamic_attr('comment_parent', comment)),
})
return output
class ContribCommentsWxrFeed(BaseWxrFeed):
link = "/"
def item_comments(self, item):
from django.contrib.comments.models import Comment
ctype = ContentType.objects.get_for_model(item)
return Comment.objects.filter(content_type=ctype, object_pk=item.pk)
def item_guid(self, item):
ctype = ContentType.objects.get_for_model(item)
return "%s_%s" % (ctype.name, item.pk)
def comment_id(self, comment):
return comment.pk
def comment_user_id(self, comment):
return force_text(comment.user_id)
def comment_user_name(self, comment):
return force_text(comment.user_name)
def comment_user_email(self, comment):
return force_text(comment.user_email)
def comment_user_url(self, comment):
return force_text(comment.user_url)
def comment_ip_address(self, comment):
return force_text(comment.ip_address)
def comment_submit_date(self, comment):
return comment.submit_date
def comment_comment(self, comment):
return comment.comment
def comment_is_approved(self, comment):
return int(comment.is_public)
comment_parent = 0
| steventimberman/masterDebater | venv/lib/python2.7/site-packages/disqus/wxr_feed.py | Python | mit | 9,430 | 0.007529 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Pool'
db.create_table(u'storageadmin_pool', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('size', self.gf('django.db.models.fields.IntegerField')(default=0)),
('raid', self.gf('django.db.models.fields.CharField')(max_length=10)),
('toc', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('storageadmin', ['Pool'])
# Adding model 'Disk'
db.create_table(u'storageadmin_disk', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pool', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Pool'], null=True, on_delete=models.SET_NULL)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=10)),
('size', self.gf('django.db.models.fields.IntegerField')()),
('offline', self.gf('django.db.models.fields.BooleanField')(default=False)),
('parted', self.gf('django.db.models.fields.BooleanField')()),
))
db.send_create_signal('storageadmin', ['Disk'])
# Adding model 'Share'
db.create_table(u'storageadmin_share', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pool', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Pool'])),
('qgroup', self.gf('django.db.models.fields.CharField')(max_length=100)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('size', self.gf('django.db.models.fields.IntegerField')()),
('owner', self.gf('django.db.models.fields.CharField')(default='root', max_length=4096)),
('group', self.gf('django.db.models.fields.CharField')(default='root', max_length=4096)),
('perms', self.gf('django.db.models.fields.CharField')(default='755', max_length=9)),
('toc', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('subvol_name', self.gf('django.db.models.fields.CharField')(max_length=4096)),
('replica', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('storageadmin', ['Share'])
# Adding model 'Snapshot'
db.create_table(u'storageadmin_snapshot', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('share', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Share'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=4096)),
('real_name', self.gf('django.db.models.fields.CharField')(default='unknownsnap', max_length=4096)),
('writable', self.gf('django.db.models.fields.BooleanField')(default=False)),
('size', self.gf('django.db.models.fields.IntegerField')(default=0)),
('toc', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('qgroup', self.gf('django.db.models.fields.CharField')(max_length=100)),
('uvisible', self.gf('django.db.models.fields.BooleanField')(default=False)),
('snap_type', self.gf('django.db.models.fields.CharField')(default='admin', max_length=64)),
))
db.send_create_signal('storageadmin', ['Snapshot'])
# Adding unique constraint on 'Snapshot', fields ['share', 'name']
db.create_unique(u'storageadmin_snapshot', ['share_id', 'name'])
# Adding model 'PoolStatistic'
db.create_table(u'storageadmin_poolstatistic', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pool', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Pool'])),
('total_capacity', self.gf('django.db.models.fields.IntegerField')()),
('used', self.gf('django.db.models.fields.IntegerField')()),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('storageadmin', ['PoolStatistic'])
# Adding model 'ShareStatistic'
db.create_table(u'storageadmin_sharestatistic', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('share', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Share'])),
('total_capacity', self.gf('django.db.models.fields.IntegerField')()),
('used', self.gf('django.db.models.fields.IntegerField')()),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('storageadmin', ['ShareStatistic'])
# Adding model 'NFSExportGroup'
db.create_table(u'storageadmin_nfsexportgroup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('host_str', self.gf('django.db.models.fields.CharField')(max_length=4096)),
('editable', self.gf('django.db.models.fields.CharField')(default='ro', max_length=2)),
('syncable', self.gf('django.db.models.fields.CharField')(default='async', max_length=5)),
('mount_security', self.gf('django.db.models.fields.CharField')(default='insecure', max_length=8)),
('nohide', self.gf('django.db.models.fields.BooleanField')(default=False)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('storageadmin', ['NFSExportGroup'])
# Adding model 'NFSExport'
db.create_table(u'storageadmin_nfsexport', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('export_group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.NFSExportGroup'])),
('share', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Share'])),
('mount', self.gf('django.db.models.fields.CharField')(max_length=4096)),
))
db.send_create_signal('storageadmin', ['NFSExport'])
# Adding model 'SambaShare'
db.create_table(u'storageadmin_sambashare', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('share', self.gf('django.db.models.fields.related.OneToOneField')(related_name='sambashare', unique=True, to=orm['storageadmin.Share'])),
('path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)),
('comment', self.gf('django.db.models.fields.CharField')(default='foo bar', max_length=100)),
('browsable', self.gf('django.db.models.fields.CharField')(default='yes', max_length=3)),
('read_only', self.gf('django.db.models.fields.CharField')(default='no', max_length=3)),
('guest_ok', self.gf('django.db.models.fields.CharField')(default='no', max_length=3)),
('create_mask', self.gf('django.db.models.fields.CharField')(default='0755', max_length=4)),
('admin_users', self.gf('django.db.models.fields.CharField')(default='Administrator', max_length=128)),
))
db.send_create_signal('storageadmin', ['SambaShare'])
# Adding model 'IscsiTarget'
db.create_table(u'storageadmin_iscsitarget', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('share', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Share'])),
('tid', self.gf('django.db.models.fields.IntegerField')(unique=True)),
('tname', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('dev_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('dev_size', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('storageadmin', ['IscsiTarget'])
# Adding model 'PosixACLs'
db.create_table(u'storageadmin_posixacls', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('smb_share', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.SambaShare'])),
('owner', self.gf('django.db.models.fields.CharField')(max_length=5)),
('perms', self.gf('django.db.models.fields.CharField')(max_length=3)),
))
db.send_create_signal('storageadmin', ['PosixACLs'])
# Adding model 'APIKeys'
db.create_table(u'storageadmin_apikeys', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.CharField')(unique=True, max_length=8)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=10)),
))
db.send_create_signal('storageadmin', ['APIKeys'])
# Adding model 'Appliance'
db.create_table(u'storageadmin_appliance', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('uuid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('ip', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)),
('current_appliance', self.gf('django.db.models.fields.BooleanField')(default=False)),
('hostname', self.gf('django.db.models.fields.CharField')(default='Rockstor', max_length=128)),
('mgmt_port', self.gf('django.db.models.fields.IntegerField')(default=443)),
))
db.send_create_signal('storageadmin', ['Appliance'])
# Adding model 'SupportCase'
db.create_table(u'storageadmin_supportcase', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('notes', self.gf('django.db.models.fields.TextField')()),
('zipped_log', self.gf('django.db.models.fields.CharField')(max_length=128)),
('status', self.gf('django.db.models.fields.CharField')(max_length=9)),
('case_type', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('storageadmin', ['SupportCase'])
# Adding model 'DashboardConfig'
db.create_table(u'storageadmin_dashboardconfig', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('widgets', self.gf('django.db.models.fields.CharField')(max_length=4096)),
))
db.send_create_signal('storageadmin', ['DashboardConfig'])
# Adding model 'NetworkInterface'
db.create_table(u'storageadmin_networkinterface', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('alias', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('mac', self.gf('django.db.models.fields.CharField')(max_length=100)),
('boot_proto', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('onboot', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('network', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('netmask', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('ipaddr', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
('itype', self.gf('django.db.models.fields.CharField')(default='io', max_length=100)),
))
db.send_create_signal('storageadmin', ['NetworkInterface'])
# Adding model 'User'
db.create_table(u'storageadmin_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='suser', unique=True, null=True, to=orm['auth.User'])),
('username', self.gf('django.db.models.fields.CharField')(default='', unique=True, max_length=4096)),
('uid', self.gf('django.db.models.fields.IntegerField')(default=5000)),
('gid', self.gf('django.db.models.fields.IntegerField')(default=5000)),
))
db.send_create_signal('storageadmin', ['User'])
# Adding model 'PoolScrub'
db.create_table(u'storageadmin_poolscrub', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pool', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Pool'])),
('status', self.gf('django.db.models.fields.CharField')(default='started', max_length=10)),
('pid', self.gf('django.db.models.fields.IntegerField')()),
('start_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('end_time', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('kb_scrubbed', self.gf('django.db.models.fields.IntegerField')(null=True)),
('errors', self.gf('django.db.models.fields.IntegerField')(null=True)),
))
db.send_create_signal('storageadmin', ['PoolScrub'])
# Adding model 'Setup'
db.create_table(u'storageadmin_setup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('setup_user', self.gf('django.db.models.fields.BooleanField')(default=False)),
('setup_system', self.gf('django.db.models.fields.BooleanField')(default=False)),
('setup_disks', self.gf('django.db.models.fields.BooleanField')(default=False)),
('setup_network', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('storageadmin', ['Setup'])
# Adding model 'SFTP'
db.create_table(u'storageadmin_sftp', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('share', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['storageadmin.Share'], unique=True)),
('editable', self.gf('django.db.models.fields.CharField')(default='ro', max_length=2)),
))
db.send_create_signal('storageadmin', ['SFTP'])
# Adding model 'Plugin'
db.create_table(u'storageadmin_plugin', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)),
('display_name', self.gf('django.db.models.fields.CharField')(default='', unique=True, max_length=4096)),
('description', self.gf('django.db.models.fields.CharField')(default='', max_length=4096)),
('css_file_name', self.gf('django.db.models.fields.CharField')(max_length=4096)),
('js_file_name', self.gf('django.db.models.fields.CharField')(max_length=4096)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)),
))
db.send_create_signal('storageadmin', ['Plugin'])
# Adding model 'InstalledPlugin'
db.create_table(u'storageadmin_installedplugin', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('plugin_meta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storageadmin.Plugin'])),
('install_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('storageadmin', ['InstalledPlugin'])
def backwards(self, orm):
# Removing unique constraint on 'Snapshot', fields ['share', 'name']
db.delete_unique(u'storageadmin_snapshot', ['share_id', 'name'])
# Deleting model 'Pool'
db.delete_table(u'storageadmin_pool')
# Deleting model 'Disk'
db.delete_table(u'storageadmin_disk')
# Deleting model 'Share'
db.delete_table(u'storageadmin_share')
# Deleting model 'Snapshot'
db.delete_table(u'storageadmin_snapshot')
# Deleting model 'PoolStatistic'
db.delete_table(u'storageadmin_poolstatistic')
# Deleting model 'ShareStatistic'
db.delete_table(u'storageadmin_sharestatistic')
# Deleting model 'NFSExportGroup'
db.delete_table(u'storageadmin_nfsexportgroup')
# Deleting model 'NFSExport'
db.delete_table(u'storageadmin_nfsexport')
# Deleting model 'SambaShare'
db.delete_table(u'storageadmin_sambashare')
# Deleting model 'IscsiTarget'
db.delete_table(u'storageadmin_iscsitarget')
# Deleting model 'PosixACLs'
db.delete_table(u'storageadmin_posixacls')
# Deleting model 'APIKeys'
db.delete_table(u'storageadmin_apikeys')
# Deleting model 'Appliance'
db.delete_table(u'storageadmin_appliance')
# Deleting model 'SupportCase'
db.delete_table(u'storageadmin_supportcase')
# Deleting model 'DashboardConfig'
db.delete_table(u'storageadmin_dashboardconfig')
# Deleting model 'NetworkInterface'
db.delete_table(u'storageadmin_networkinterface')
# Deleting model 'User'
db.delete_table(u'storageadmin_user')
# Deleting model 'PoolScrub'
db.delete_table(u'storageadmin_poolscrub')
# Deleting model 'Setup'
db.delete_table(u'storageadmin_setup')
# Deleting model 'SFTP'
db.delete_table(u'storageadmin_sftp')
# Deleting model 'Plugin'
db.delete_table(u'storageadmin_plugin')
# Deleting model 'InstalledPlugin'
db.delete_table(u'storageadmin_installedplugin')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storageadmin.apikeys': {
'Meta': {'object_name': 'APIKeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
'storageadmin.appliance': {
'Meta': {'object_name': 'Appliance'},
'current_appliance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "'Rockstor'", 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'mgmt_port': ('django.db.models.fields.IntegerField', [], {'default': '443'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'storageadmin.dashboardconfig': {
'Meta': {'object_name': 'DashboardConfig'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'widgets': ('django.db.models.fields.CharField', [], {'max_length': '4096'})
},
'storageadmin.disk': {
'Meta': {'object_name': 'Disk'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parted': ('django.db.models.fields.BooleanField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'storageadmin.installedplugin': {
'Meta': {'object_name': 'InstalledPlugin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'install_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin_meta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Plugin']"})
},
'storageadmin.iscsitarget': {
'Meta': {'object_name': 'IscsiTarget'},
'dev_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'dev_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'tid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'tname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'storageadmin.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'boot_proto': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'itype': ('django.db.models.fields.CharField', [], {'default': "'io'", 'max_length': '100'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'netmask': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'onboot': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.nfsexport': {
'Meta': {'object_name': 'NFSExport'},
'export_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.NFSExportGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"})
},
'storageadmin.nfsexportgroup': {
'Meta': {'object_name': 'NFSExportGroup'},
'editable': ('django.db.models.fields.CharField', [], {'default': "'ro'", 'max_length': '2'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'host_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount_security': ('django.db.models.fields.CharField', [], {'default': "'insecure'", 'max_length': '8'}),
'nohide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'syncable': ('django.db.models.fields.CharField', [], {'default': "'async'", 'max_length': '5'})
},
'storageadmin.plugin': {
'Meta': {'object_name': 'Plugin'},
'css_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4096'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'})
},
'storageadmin.pool': {
'Meta': {'object_name': 'Pool'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'raid': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.poolscrub': {
'Meta': {'object_name': 'PoolScrub'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'errors': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kb_scrubbed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '10'})
},
'storageadmin.poolstatistic': {
'Meta': {'object_name': 'PoolStatistic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'total_capacity': ('django.db.models.fields.IntegerField', [], {}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used': ('django.db.models.fields.IntegerField', [], {})
},
'storageadmin.posixacls': {
'Meta': {'object_name': 'PosixACLs'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'perms': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'smb_share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SambaShare']"})
},
'storageadmin.sambashare': {
'Meta': {'object_name': 'SambaShare'},
'admin_users': ('django.db.models.fields.CharField', [], {'default': "'Administrator'", 'max_length': '128'}),
'browsable': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'comment': ('django.db.models.fields.CharField', [], {'default': "'foo bar'", 'max_length': '100'}),
'create_mask': ('django.db.models.fields.CharField', [], {'default': "'0755'", 'max_length': '4'}),
'guest_ok': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'read_only': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sambashare'", 'unique': 'True', 'to': "orm['storageadmin.Share']"})
},
'storageadmin.setup': {
'Meta': {'object_name': 'Setup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setup_disks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_network': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.sftp': {
'Meta': {'object_name': 'SFTP'},
'editable': ('django.db.models.fields.CharField', [], {'default': "'ro'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['storageadmin.Share']", 'unique': 'True'})
},
'storageadmin.share': {
'Meta': {'object_name': 'Share'},
'group': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'owner': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
'perms': ('django.db.models.fields.CharField', [], {'default': "'755'", 'max_length': '9'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'replica': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'subvol_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.sharestatistic': {
'Meta': {'object_name': 'ShareStatistic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'total_capacity': ('django.db.models.fields.IntegerField', [], {}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used': ('django.db.models.fields.IntegerField', [], {})
},
'storageadmin.snapshot': {
'Meta': {'unique_together': "(('share', 'name'),)", 'object_name': 'Snapshot'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'real_name': ('django.db.models.fields.CharField', [], {'default': "'unknownsnap'", 'max_length': '4096'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'snap_type': ('django.db.models.fields.CharField', [], {'default': "'admin'", 'max_length': '64'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uvisible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'writable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.supportcase': {
'Meta': {'object_name': 'SupportCase'},
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'zipped_log': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.user': {
'Meta': {'object_name': 'User'},
'gid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'suser'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'})
}
}
complete_apps = ['storageadmin'] | schakrava/rockstor-core | src/rockstor/storageadmin/south_migrations/0001_initial.py | Python | gpl-3.0 | 36,999 | 0.00746 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.