text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Networks.
"""
import logging
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.forms import UpdateNetwork
from openstack_dashboard.dashboards.project.networks.ports.tables \
import PortsTable
from openstack_dashboard.dashboards.project.networks.subnets.tables \
import SubnetsTable
from openstack_dashboard.dashboards.project.networks.tables \
import NetworksTable
from openstack_dashboard.dashboards.project.networks.workflows \
import CreateNetwork
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = NetworksTable
template_name = 'project/networks/index.html'
def get_data(self):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(self.request,
tenant_id)
except:
networks = []
msg = _('Network list can not be retrieved.')
exceptions.handle(self.request, msg)
for n in networks:
n.set_id_as_name_if_empty()
return networks
class CreateView(workflows.WorkflowView):
workflow_class = CreateNetwork
def get_initial(self):
pass
class UpdateView(forms.ModalFormView):
form_class = UpdateNetwork
template_name = 'project/networks/update.html'
context_object_name = 'network'
success_url = reverse_lazy("horizon:project:networks:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
network_id = self.kwargs['network_id']
try:
self._object = api.neutron.network_get(self.request,
network_id)
except:
redirect = self.success_url
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up']}
class DetailView(tables.MultiTableView):
table_classes = (SubnetsTable, PortsTable)
template_name = 'project/networks/detail.html'
failure_url = reverse_lazy('horizon:project:networks:index')
def get_subnets_data(self):
try:
network = self._get_data()
subnets = api.neutron.subnet_list(self.request,
network_id=network.id)
except:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
for s in subnets:
s.set_id_as_name_if_empty()
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except:
ports = []
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
def _get_data(self):
if not hasattr(self, "_network"):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
self._network = network
return self._network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["network"] = self._get_data()
return context
| rackerlabs/horizon | openstack_dashboard/dashboards/project/networks/views.py | Python | apache-2.0 | 5,149 | 0.000971 |
# -*- coding: utf-8 -*-
# wpcdesk - WordPress Comment Desktop
# Copyright (C) 2012 Eka Putra - ekaputra@balitechy.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui, QtCore
from gui.comment_window import Ui_CommentWindow
from wpcdesk_threads import EditCommentThread, DeleteCommentThread
class CommentEditor(QtGui.QDialog):
def __init__(self, parent=None, data=None):
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_CommentWindow()
self.ui.setupUi(self)
self.ui.progressBar.hide()
self.set_validator()
self.parent = parent
self.data = data
self.fill_form(self.data)
QtCore.QObject.connect(self.ui.btn_save, QtCore.SIGNAL("clicked()"), self.saveComment)
QtCore.QObject.connect(self.ui.btn_delete, QtCore.SIGNAL("clicked()"), self.deleteComment)
self.edit_comment_thread = EditCommentThread()
self.edit_comment_thread.is_loading.connect(self.loading)
self.edit_comment_thread.is_success.connect(self.edit_status)
self.delete_comment_thread = DeleteCommentThread(self.data)
self.delete_comment_thread.is_loading.connect(self.loading)
self.delete_comment_thread.is_success.connect(self.delete_status)
def set_validator(self):
# Email Validator
email_pattern = QtCore.QRegExp( r"^([a-zA-Z0-9_\.\-\+])+\@(([a-zA-Z0-9\-])+\.)+([a-zA-Z0-9]{2,4})+$" )
email_validator = QtGui.QRegExpValidator(email_pattern , self )
self.ui.edit_email.setValidator(email_validator)
def fill_form(self, data):
self.comment_id = data['comment_id']
self.ui.lbl_post.setText(data['comment_post'])
self.ui.lbl_date.setText(data['comment_date'])
self.ui.edit_name.setText(data['comment_author'])
self.ui.edit_email.setText(data['comment_email'])
self.ui.edit_comment.setText(data['comment_content'])
if data['comment_status'] == 'Approved':
self.ui.cb_status.setChecked(True)
else:
self.ui.cb_status.setChecked(False)
def saveComment(self):
data = {}
if self.ui.cb_status.isChecked():
data['status'] = 'approve'
else:
data['status'] = 'hold'
data['content'] = str(self.ui.edit_comment.toPlainText())
data['author'] = str(self.ui.edit_name.text())
data['author_email'] = str(self.ui.edit_email.text())
self.edit_comment_thread.set_comment_id(int(self.data['comment_id']))
self.edit_comment_thread.set_data(data)
self.edit_comment_thread.start()
def deleteComment(self):
answer = QtGui.QMessageBox.question(self, 'Confirmation','Are you sure want to delete this comment?', QtGui.QMessageBox.Yes|QtGui.QMessageBox.Cancel)
if answer == QtGui.QMessageBox.Yes:
self.delete_comment_thread.start()
else:
return
def loading(self, is_loading):
if is_loading:
self.ui.progressBar.show()
else:
self.ui.progressBar.hide()
def edit_status(self, status):
if status:
self.parent.loadComments()
QtGui.QMessageBox.information(self, 'Comment updated!','Comment successfuly updated.', QtGui.QMessageBox.Ok)
else:
QtGui.QMessageBox.warning(self, 'Failed!','Failed to update comment.', QtGui.QMessageBox.Ok)
def delete_status(self, status):
if status:
self.parent.loadComments()
QtGui.QMessageBox.information(self, 'Comment Deleted','Comment successfuly deleted.', QtGui.QMessageBox.Ok)
self.close()
else:
QtGui.QMessageBox.warning(self, 'Failed!','Failed to delete comment.', QtGui.QMessageBox.Ok)
| ekaputra07/wpcdesk | wpcdesk/comment_editor.py | Python | gpl-3.0 | 4,371 | 0.004576 |
import chainer
def main():
return chainer.datasets.get_mnist(withlabel=False)[0]
| fukatani/CW_gui | examples/mnist/get_mnist_prediction.py | Python | bsd-3-clause | 87 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from lib.utility.basetype import ValuedEnum
from lib.utility.basetype import OrderedEnum
EbCliVersion = 'v2.6.0'
class Key(object):
Default = 'default'
Options = 'options'
#----------------------------------------------
# Parameters
#----------------------------------------------
# Standard name of parameters used in Elastic Beanstalk Command Line Interface
ParameterName = ValuedEnum({
u'Command' : 0,
u'SubCommand' : 1,
u'AwsAccessKeyId' : 11,
u'AwsSecretAccessKey' : 12,
u'AwsCredentialFile' : 13,
u'Region' : 21,
u'OriginalRegion' : 22,
u'ServiceEndpoint' : 31,
u'DevToolsEndpoint' : 41,
u'ApplicationName': 101,
u'OriginalApplicationName': 102,
u'ApplicationVersionName':111,
u'EnvironmentName':121,
u'EnvironmentId':122,
u'EnvironmentTier':150,
u'SolutionStack' : 201,
u'OriginalSolutionStack' : 202,
u'EnvironmentType' : 211,
u'Branches': 301,
u'CurrentBranch': 302,
u'BranchMapping': 303,
u'DefaultEnvironmentName': 351,
u'OptionSettingFile' : 501,
u'ConfigFileExtra' : 511,
u'RdsEnabled': 601,
u'RdsEndpoint': 602,
u'RdsSnippetUrl': 603,
u'RdsSourceSnapshotName': 606,
u'RdsEngine': 611,
u'RdsEngineVersion': 612,
u'RdsInstanceClass': 613,
u'RdsMultiAZ': 614,
u'RdsLicenseModel': 615,
u'RdsAllocatedStorage': 616,
u'RdsInstanceName': 621,
u'RdsMasterUsername': 622,
u'RdsMasterPassword': 623,
u'RdsDbName' : 631,
u'RdsDeletionPolicy': 651,
u'InstanceProfileName': 701,
u'ServiceConnectionTimeout' : 1001,
u'ServiceRetryThreshold' : 1011,
u'Force' : 1021,
u'Verbose' : 1051,
u'WaitForFinishTimeout': 1101,
u'WaitForUpdateTimeout': 1102,
u'PollDelay' : 1201,
u'CreateEnvironmentRequestID' : 2001,
u'TerminateEnvironmentRequestID' : 2002,
u'UpdateEnvironmentRequestID' : 2003,
u'RequestEnvInfoRequestID' : 2004,
u'AvailableSolutionStacks': 2101,
})
# Source of parameter value
ParameterSource = ValuedEnum({
u'CliArgument' : 0,
u'Terminal' : 1,
u'ConfigFile' : 2,
u'OsEnvironment' : 3,
u'OperationOutput' : 4,
u'Default' : 10,
})
#----------------------------------------------
# Command
#----------------------------------------------
CommandType = OrderedEnum([
u'INIT',
u'BRANCH',
u'START',
u'STATUS',
u'UPDATE',
u'STOP',
u'DELETE',
u'LOGS',
u'EVENTS',
u'PUSH',
])
SubCommandType = OrderedEnum([
# LOGS command
u'TAIL',
u'OPEN'
])
CommandCombination = {
CommandType.LOGS : {
Key.Default : SubCommandType.TAIL,
Key.Options : [
SubCommandType.TAIL,
]
},
}
#----------------------------------------------
# Terminal
#----------------------------------------------
class TerminalConstant(object):
Y = u'Y'
Yes = u'Yes'
N = u'N'
No = u'No'
TRUE = u'True'
FALSE = u'False'
RdsSnapshotListNumber = 5
IamProfileListNumber = 6
#----------------------------------------------
# Services
#----------------------------------------------
ServiceRegion = OrderedEnum([
u'UsEast1',
u'UsWest1',
u'UsWest2',
u'EuWest1',
u'ApNortheast1',
u'ApSoutheast1',
u'ApSoutheast2',
u'SaEast1',
])
AvailableServiceRegion = [
ServiceRegion.UsEast1,
ServiceRegion.UsWest2,
ServiceRegion.UsWest1,
ServiceRegion.EuWest1,
ServiceRegion.ApSoutheast1,
ServiceRegion.ApNortheast1,
ServiceRegion.ApSoutheast2,
ServiceRegion.SaEast1,
]
ServiceRegionName = {
ServiceRegion.ApNortheast1 : u'Asia Pacific (Tokyo)',
ServiceRegion.ApSoutheast1 : u'Asia Pacific (Singapore)',
ServiceRegion.ApSoutheast2 : u'Asia Pacific (Sydney)',
ServiceRegion.EuWest1: u'EU West (Ireland)',
ServiceRegion.SaEast1: u'South America (Sao Paulo)',
ServiceRegion.UsEast1 : u'US East (Virginia)',
ServiceRegion.UsWest1 : u'US West (North California)',
ServiceRegion.UsWest2 : u'US West (Oregon)',
}
ServiceRegionId = {
ServiceRegion.ApNortheast1 : u'ap-northeast-1',
ServiceRegion.ApSoutheast1 : u'ap-southeast-1',
ServiceRegion.ApSoutheast2 : u'ap-southeast-2',
ServiceRegion.EuWest1: u'eu-west-1',
ServiceRegion.SaEast1: u'sa-east-1',
ServiceRegion.UsEast1 : u'us-east-1',
ServiceRegion.UsWest1 : u'us-west-1',
ServiceRegion.UsWest2 : u'us-west-2',
}
ServiceEndpoint = {
ServiceRegion.ApNortheast1 : u'https://elasticbeanstalk.ap-northeast-1.amazonaws.com',
ServiceRegion.ApSoutheast1 : u'https://elasticbeanstalk.ap-southeast-1.amazonaws.com',
ServiceRegion.ApSoutheast2 : u'https://elasticbeanstalk.ap-southeast-2.amazonaws.com',
ServiceRegion.EuWest1: u'https://elasticbeanstalk.eu-west-1.amazonaws.com',
ServiceRegion.SaEast1: u'https://elasticbeanstalk.sa-east-1.amazonaws.com',
ServiceRegion.UsEast1 : u'https://elasticbeanstalk.us-east-1.amazonaws.com',
ServiceRegion.UsWest1 : u'https://elasticbeanstalk.us-west-1.amazonaws.com',
ServiceRegion.UsWest2 : u'https://elasticbeanstalk.us-west-2.amazonaws.com',
}
SnippetBucket = {
ServiceRegion.ApNortheast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-ap-northeast-1/eb_snippets',
ServiceRegion.ApSoutheast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-ap-southeast-1/eb_snippets',
ServiceRegion.ApSoutheast2 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-ap-southeast-2/eb_snippets',
ServiceRegion.EuWest1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-eu-west-1/eb_snippets',
ServiceRegion.SaEast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-sa-east-1/eb_snippets',
ServiceRegion.UsEast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-east-1/eb_snippets',
ServiceRegion.UsWest1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-west-1/eb_snippets',
ServiceRegion.UsWest2 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-west-2/eb_snippets',
}
PolicyBucket = {
ServiceRegion.ApNortheast1 : u'https://elasticbeanstalk-env-resources-ap-northeast-1.s3.amazonaws.com/eb_policies',
ServiceRegion.ApSoutheast1 : u'https://elasticbeanstalk-env-resources-ap-southeast-1.s3.amazonaws.com/eb_policies',
ServiceRegion.ApSoutheast2 : u'https://elasticbeanstalk-env-resources-ap-southeast-2.s3.amazonaws.com/eb_policies',
ServiceRegion.EuWest1 : u'https://elasticbeanstalk-env-resources-eu-west-1.s3.amazonaws.com/eb_policies',
ServiceRegion.SaEast1 : u'https://elasticbeanstalk-env-resources-sa-east-1.s3.amazonaws.com/eb_policies',
ServiceRegion.UsEast1 : u'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-east-1/eb_policies',
ServiceRegion.UsWest1 : u'https://elasticbeanstalk-env-resources-us-west-1.s3.amazonaws.com/eb_policies',
ServiceRegion.UsWest2 : u'https://elasticbeanstalk-env-resources-us-west-2.s3.amazonaws.com/eb_policies',
}
DevToolsEndpoint = {
ServiceRegion.ApNortheast1 : u'git.elasticbeanstalk.ap-northeast-1.amazonaws.com',
ServiceRegion.ApSoutheast1 : u'git.elasticbeanstalk.ap-southeast-1.amazonaws.com',
ServiceRegion.ApSoutheast2 : u'git.elasticbeanstalk.ap-southeast-2.amazonaws.com',
ServiceRegion.EuWest1: u'git.elasticbeanstalk.eu-west-1.amazonaws.com',
ServiceRegion.SaEast1: u'git.elasticbeanstalk.sa-east-1.amazonaws.com',
ServiceRegion.UsEast1 : u'git.elasticbeanstalk.us-east-1.amazonaws.com',
ServiceRegion.UsWest1 : u'git.elasticbeanstalk.us-west-1.amazonaws.com',
ServiceRegion.UsWest2 : u'git.elasticbeanstalk.us-west-2.amazonaws.com',
}
class EbDefault(object):
TailLog = u'tail'
RoleAssumePolicyUrlMask = u'{0}/role-assume-policy'
DefaultRoleName = u'aws-elasticbeanstalk-ec2-role'
DefaultInstanceProfileName = u'aws-elasticbeanstalk-ec2-role'
class DevToolsDefault(object):
NameDelimiter = u'-'
VersionNameRe = u'^git-{0}-\d+$'
VersionNameMask = u'git-{0}-{1}'
AwsPush = [u'git', u'aws.push']
#----------------------------------------------
# Solution stacks and sample app
#----------------------------------------------
class DefaultAppSource(object):
Namespace = u'aws:cloudformation:template:parameter'
OptionName = u'AppSource'
class LegacyContainer(object):
Regex = u'\(legacy\) *$'
class TomcatAppContainer(object):
Name = u'Tomcat'
Regex = u'^(32|64)bit Amazon Linux running Tomcat (6|7)(( (L|l)egacy)|( \((L|l)egacy\)))?$'
class PhpAppContainer(object):
Name = u'PHP'
Regex = u'^(32|64)bit Amazon Linux running PHP 5.3(( (L|l)egacy)|( \((L|l)egacy\)))?$'
class IisAppContainer(object):
Name = u'IIS'
Regex = u'^64bit Windows Server 2008 R2 running IIS 7.5(( (L|l)egacy)|( \((L|l)egacy\)))?$'
class PythonAppContainer(object):
Name = u'Python'
Regex = u'^(32|64)bit Amazon Linux running Python.*'
class RubyAppContainer(object):
Name = u'Ruby'
Regex = u'^(32|64)bit Amazon Linux running Ruby .*'
#----------------------------------------------
# RDS
#----------------------------------------------
RdsEndpoint = {
ServiceRegion.ApNortheast1 : u'https://rds.ap-northeast-1.amazonaws.com',
ServiceRegion.ApSoutheast1 : u'https://rds.ap-southeast-1.amazonaws.com',
ServiceRegion.ApSoutheast2 : u'https://rds.ap-southeast-2.amazonaws.com',
ServiceRegion.EuWest1: u'https://rds.eu-west-1.amazonaws.com',
ServiceRegion.SaEast1: u'https://rds.sa-east-1.amazonaws.com',
ServiceRegion.UsEast1 : u'https://rds.amazonaws.com',
ServiceRegion.UsWest1 : u'https://rds.us-west-1.amazonaws.com',
ServiceRegion.UsWest2 : u'https://rds.us-west-2.amazonaws.com',
}
class RdsDefault(object):
PasswordMismatchThreshold = 3
SnippetUrlMask = u'{0}/rds/rds.json'
SnippetName = u'RdsExtensionEB'
SnippetAddOrder = 10000
SnippetRemoveOrder = -1
DbIdLengthLimit = {
u'mysql' : 63,
u'sqlserver-ex' : 15,
u'sqlserver-se' : 15,
u'sqlserver-web' : 15,
}
DeletionPolicySnapshot = u'Snapshot'
DeletionPolicyDelete = u'Delete'
ResourceType = u'AWS::RDS::DBInstance'
HostnameType = u'Endpoint'
PortType = u'Port'
@classmethod
def get_snippet_url(cls, region):
return cls.SnippetUrlMask.format(SnippetBucket[region])
@classmethod
def bool_to_del_policy(cls, switch):
if switch:
return cls.DeletionPolicySnapshot
else:
return cls.DeletionPolicyDelete
@classmethod
def del_policy_to_bool(cls, policy):
if policy == cls.DeletionPolicySnapshot:
return True
else:
return False
Namespace = u'aws:rds:dbinstance'
OptionNames = {
ParameterName.RdsEngine : u'DBEngine',
ParameterName.RdsEngineVersion : u'DBEngineVersion',
ParameterName.RdsInstanceClass : u'DBInstanceClass',
ParameterName.RdsAllocatedStorage : u'DBAllocatedStorage',
ParameterName.RdsMultiAZ : u'MultiAZDatabase',
ParameterName.RdsLicenseModel : u'DBLicenseModel',
ParameterName.RdsSourceSnapshotName : u'DBSnapshotIdentifier',
ParameterName.RdsDbName : u'DBName',
ParameterName.RdsMasterUsername : u'DBUser',
ParameterName.RdsMasterPassword : u'DBPassword',
ParameterName.RdsDeletionPolicy : u'DBDeletionPolicy',
}
OptionMinSet = {
ParameterName.RdsEngine,
ParameterName.RdsSourceSnapshotName,
ParameterName.RdsMasterPassword,
ParameterName.RdsDeletionPolicy,
}
PasswordMinSize = 8
PasswordMaxSize = 41
#----------------------------------------------
# IAM
#----------------------------------------------
IamEndpoint = u'https://iam.amazonaws.com'
IamRegion = u'us-east-1'
#----------------------------------------------
# Application and environment default
#----------------------------------------------
class EnvironmentStatus(object):
Launching = u'Launching'
Ready = u'Ready'
Updating = u'Updating'
Terminating = u'Terminating'
Terminated = u'Terminated'
class EnvironmentHealth(object):
Green = u'Green'
Yellow = u'Yellow'
Red = u'Red'
Grey = u'Grey'
class EventSeverity(object):
Trace = u'TRACE'
Debug = u'Debug'
Info = u'INFO'
Warn = u'WARN'
Error = u'ERROR'
Fatal = u'FATAL'
class ValidationSeverity(object):
SeverityError = u'error'
SeverityWarning = u'warning'
class ServiceDefault(object):
""" Defines CLI related constant values. """
DEFAULT_VERSION_NAME = u'Sample Application'
SERVICE_CALL_MAX_RETRY = 5
CONNECTION_TIMEOUT_IN_SEC = 30
WAIT_TIMEOUT_IN_SEC = 600
UPDATE_TIMEOUT_IN_SEC = 300
RDS_ADDITION_TIMEOUT_IN_SEC = 300
POLL_DELAY_IN_SEC = 5
CREATE_ENV_POLL_DELAY = 3
TERMINATE_ENV_POLL_DELAY = 0
UPDATE_ENV_POLL_DELAY = 0
CHAR_CODEC = 'utf-8'
ENABLED = u'Enabled'
USER_AGENT = 'eb ' + EbCliVersion
STATUS_EVENT_LEVEL = EventSeverity.Warn
STATUS_EVENT_MAX_NUM = 3
EVENT_DEFAULT_NUM = 10
class Environment(object):
REGEX_NAME_FILTER = u'[^A-Za-z0-9\-]+'
NAME_POSTFIX = u'-env'
MAX_NAME_LEN = 23
BRANCH_NAME_SEPERATOR = u'-'
OutputLevel = OrderedEnum([
u'Info',
u'ResultOnly',
u'Quiet',
u'Silence',
])
#----------------------------------------------
# Configuration file and log file
#----------------------------------------------
class FileDefaultParameter(object):
RotationMaxRetry = 1000
class OSSpecific(object):
'''Windows specific constants'''
WindowsName = u'Windows'
WindowsClimbUpDepth = 2
WindowsModuleScriptPath = u'AWSDevTools\\Windows'
WindowsModuleScriptName = u'AWSDevTools-OneTimeSetup.bat'
WindowsRepoScript = u'AWSDevTools\\Windows\\AWSDevTools-RepositorySetup.bat'
'''Nix specific constants'''
LinuxName = u'Linux'
LinuxClimbUpDepth = 3
LinuxRepoScript = u'AWSDevTools/Linux/AWSDevTools-RepositorySetup.sh'
class AwsCredentialFileDefault(object):
FilePath = u'.elasticbeanstalk'
FileName = u'aws_credential_file'
OSVariableName = u'AWS_CREDENTIAL_FILE'
KeyName = {
ParameterName.AwsAccessKeyId : u'AWSAccessKeyId',
ParameterName.AwsSecretAccessKey : u'AWSSecretKey',
ParameterName.RdsMasterPassword : u'RDSMasterPassword',
}
class EbLocalDir(object):
Path = u'.elasticbeanstalk'
Name = Path + u'/'
NameRe = Path + u'/'
LogDir = u'log'
class EbLogFile(object):
Name = u'eb-cli.log'
NameRe = u'.*eb-cli\.log.*'
class EbConfigFile(object):
Name = u'config'
NameRe = u'.*\config.*'
SectionNameDelimiter = u':'
RootSectionName = u'global'
RootSectionKeys = {
ParameterName.AwsCredentialFile,
ParameterName.ApplicationName,
ParameterName.ApplicationVersionName,
ParameterName.DevToolsEndpoint,
ParameterName.EnvironmentName,
ParameterName.OptionSettingFile,
ParameterName.EnvironmentTier,
ParameterName.SolutionStack,
ParameterName.Region,
ParameterName.ServiceEndpoint,
ParameterName.RdsEnabled,
ParameterName.RdsSourceSnapshotName,
ParameterName.RdsDeletionPolicy,
ParameterName.InstanceProfileName,
ParameterName.EnvironmentType,
}
BranchResetParameters = {
ParameterName.ApplicationName : ParameterName.OriginalApplicationName,
ParameterName.Region : ParameterName.OriginalRegion,
ParameterName.SolutionStack : ParameterName.OriginalSolutionStack,
}
BranchSectionName = u'branches'
BranchSectionPrefix = u'branch' + SectionNameDelimiter
BranchSectionKeys = {
ParameterName.ApplicationVersionName,
ParameterName.EnvironmentName,
ParameterName.EnvironmentTier,
ParameterName.OptionSettingFile,
ParameterName.RdsEnabled,
ParameterName.RdsSourceSnapshotName,
ParameterName.RdsDeletionPolicy,
ParameterName.InstanceProfileName,
ParameterName.EnvironmentType,
}
BranchSectionHiddenKeys = {
ParameterName.RdsMasterPassword,
}
# Map from section name to (section existence condition, list of member keys)
KnownSections = {
RootSectionName : (ParameterName.ApplicationName, RootSectionKeys),
}
class OptionSettingFile(object):
Name = u'optionsettings'
class CABundle(object):
Path = u'.'
Name = u'ca-bundle.crt'
class FileErrorConstant(object):
FileNotFoundErrorCode = 2
FileNotFoundErrorMsg = u'No such file or directory'
#----------------------------------------------
# Git and DevTools file
#----------------------------------------------
class GitDefault(object):
HeadRe = u'\* .+'
GetBranch = [u'git', u'branch']
GetHeadHash = [u'git', u'rev-parse', u'HEAD']
class GitIgnoreFile(object):
Name = u'.gitignore'
Path = u'.'
Files = {
EbLocalDir,
}
class DevToolsConfigFile(object):
Name = u'config'
Path = u'.git'
InitHelpUrl = u'http://docs.amazonwebservices.com/elasticbeanstalk'\
'/latest/dg/command-reference-get-started.html'
#----------------------------------------------
# OptionSettingList
#----------------------------------------------
LocalOptionSettings = {
u'aws:autoscaling:launchconfiguration' : {
u'EC2KeyName',
u'InstanceType',
},
u'aws:elasticbeanstalk:sns:topics' : {
u'Notification Endpoint',
u'Notification Protocol',
},
u'aws:elasticbeanstalk:monitoring' : {
u'Automatically Terminate Unhealthy Instances',
},
u'aws:elasticbeanstalk:hostmanager' : {
u'LogPublicationControl',
},
u'aws:elasticbeanstalk:application' : {
u'Application Healthcheck URL',
},
u'aws:autoscaling:asg' : {
u'MaxSize',
u'MinSize',
u'Custom Availability Zones',
},
u'aws:autoscaling:updatepolicy:rollingupdate' : {
u'RollingUpdateEnabled',
},
u'aws:rds:dbinstance' : {
u'DBDeletionPolicy',
u'DBEngine',
u'DBInstanceClass',
u'DBSnapshotIdentifier',
u'DBUser',
},
u'aws:ec2:vpc' : {
u'VPCId',
u'Subnets',
u'ELBSubnets',
u'DBSubnets',
u'ELBScheme',
u'AutoScalingGroupScheme',
},
u'aws:elasticbeanstalk:sqsd' : {
u'WorkerQueueURL',
u'HttpPath',
u'MimeType',
u'MaxRetries',
u'HttpConnections',
u'ConnectTimeout',
u'InactivityTimeout',
u'VisibilityTimeout',
u'RetentionPeriod',
},
}
OptionSettingContainerPrefix = u'aws:elasticbeanstalk:container'
class OptionSettingApplicationEnvironment(object):
Namespace = u'aws:elasticbeanstalk:application:environment'
IgnoreOptionNames = {
u'AWS_ACCESS_KEY_ID',
u'AWS_SECRET_KEY',
}
class OptionSettingVPC(object):
Namespace = u'aws:ec2:vpc'
MagicOptionName = u'Subnets'
DBSubnets = u'DBSubnets'
TrimOption = {
u'aws:autoscaling:asg' : {
u'Custom Availability Zones',
},
}
class OptionSettingIAMProfile(object):
Namespace = u'aws:autoscaling:launchconfiguration'
OptionName = u'IamInstanceProfile'
class OptionSettingEnvironmentType(object):
Namespace = u'aws:elasticbeanstalk:environment'
OptionName = u'EnvironmentType'
| coen-hyde/dotfiles | libs/eb/scli/constants.py | Python | mit | 21,046 | 0.013779 |
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_servicebus
version_added: "2.8"
short_description: Manage Azure Service Bus.
description:
- Create, update or delete an Azure Service Bus namespaces.
options:
resource_group:
description:
- name of resource group.
required: true
name:
description:
- name of the servicebus namespace
required: true
state:
description:
- Assert the state of the route. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
location:
description:
- Namespace location.
sku:
description:
- Namespace sku.
choices:
- standard
- basic
- premium
default:
standard
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create a namespace
azure_rm_servicebus:
name: deadbeef
location: eastus
'''
RETURN = '''
id:
description: Current state of the service bus.
returned: success
type: str
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
from ansible.module_utils._text import to_native
from datetime import datetime, timedelta
class AzureRMServiceBus(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
location=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
sku=dict(type='str', choices=['basic', 'standard', 'premium'], default='standard')
)
self.resource_group = None
self.name = None
self.state = None
self.sku = None
self.location = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMServiceBus, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
changed = False
if not self.location:
resource_group = self.get_resource_group(self.resource_group)
self.location = resource_group.location
original = self.get()
if self.state == 'present' and not original:
self.check_name()
changed = True
if not self.check_mode:
original = self.create()
elif self.state == 'absent' and original:
changed = True
original = None
if not self.check_mode:
self.delete()
self.results['deleted'] = True
if original:
self.results = self.to_dict(original)
self.results['changed'] = changed
return self.results
def check_name(self):
try:
check_name = self.servicebus_client.namespaces.check_name_availability_method(self.name)
if not check_name or not check_name.name_available:
self.fail("Error creating namespace {0} - {1}".format(self.name, check_name.message or str(check_name)))
except Exception as exc:
self.fail("Error creating namespace {0} - {1}".format(self.name, exc.message or str(exc)))
def create(self):
self.log('Cannot find namespace, creating a one')
try:
sku = self.servicebus_models.SBSku(name=str.capitalize(self.sku))
poller = self.servicebus_client.namespaces.create_or_update(self.resource_group,
self.name,
self.servicebus_models.SBNamespace(location=self.location,
sku=sku))
ns = self.get_poller_result(poller)
except Exception as exc:
self.fail('Error creating namespace {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
return ns
def delete(self):
try:
self.servicebus_client.namespaces.delete(self.resource_group, self.name)
return True
except Exception as exc:
self.fail("Error deleting route {0} - {1}".format(self.name, str(exc)))
def get(self):
try:
return self.servicebus_client.namespaces.get(self.resource_group, self.name)
except Exception:
return None
def to_dict(self, instance):
result = dict()
attribute_map = self.servicebus_models.SBNamespace._attribute_map
for attribute in attribute_map.keys():
value = getattr(instance, attribute)
if not value:
continue
if isinstance(value, self.servicebus_models.SBSku):
result[attribute] = value.name.lower()
elif isinstance(value, datetime):
result[attribute] = str(value)
elif isinstance(value, str):
result[attribute] = to_native(value)
elif attribute == 'max_size_in_megabytes':
result['max_size_in_mb'] = value
else:
result[attribute] = value
return result
def is_valid_timedelta(value):
if value == timedelta(10675199, 10085, 477581):
return None
return value
def main():
AzureRMServiceBus()
if __name__ == '__main__':
main()
| helldorado/ansible | lib/ansible/modules/cloud/azure/azure_rm_servicebus.py | Python | gpl-3.0 | 6,397 | 0.003126 |
#
# django-newsletter documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 13 13:53:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from pkg_resources import get_distribution
# Determine whether rendering on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# Django bogus settings for autodoc
import django
from django.conf import settings
settings.configure(
SECRET_KEY='bogus', SITE_ID=1,
INSTALLED_APPS=[
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.sites',
'sorl.thumbnail',
'newsletter'
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
)
django.setup()
from django.core.management import call_command
call_command('migrate', interactive=False)
autodoc_default_flags = ['members', 'show-inheritance']
autodoc_member_order = 'bysource'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-newsletter'
copyright = '2013, Mathijs de Bruin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = get_distribution('django-newsletter').version
# for example take major/minor
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['newsletter']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-newsletterdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-newsletter.tex', 'django-newsletter Documentation',
'Mathijs de Bruin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-newsletter', 'django-newsletter Documentation',
['Mathijs de Bruin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-newsletter', 'django-newsletter Documentation',
'Mathijs de Bruin', 'django-newsletter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'django': ('http://django.readthedocs.org/en/latest/', None),
# This causes namespace collisions with references. :s
# 'python': ('http://python.readthedocs.org/en/latest/', None),
# 'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
}
| dsanders11/django-newsletter | docs/conf.py | Python | agpl-3.0 | 9,244 | 0.006274 |
#
# XendBootloader.py - Framework to run a boot loader for picking the kernel
#
# Copyright 2005-2006 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os, select, errno, stat, signal, tty
import random
import shlex
from xen.xend import sxp
from xen.util import mkdir, oshelp
from XendLogging import log
from XendError import VmError
import pty, termios, fcntl
from xen.lowlevel import ptsname
def bootloader(blexec, disk, dom, quiet = False, blargs = '', kernel = '',
ramdisk = '', kernel_args = ''):
"""Run the boot loader executable on the given disk and return a
config image.
@param blexec Binary to use as the boot loader
@param disk Disk to run the boot loader on.
@param dom DomainInfo representing the domain being booted.
@param quiet Run in non-interactive mode, just booting the default.
@param blargs Arguments to pass to the bootloader."""
if not os.access(blexec, os.X_OK):
msg = "Bootloader isn't executable"
log.error(msg)
raise VmError(msg)
if not os.access(disk, os.R_OK):
msg = "Disk isn't accessible"
log.error(msg)
raise VmError(msg)
if os.uname()[0] == "NetBSD" and disk.startswith('/dev/'):
disk = disk.replace("/dev/", "/dev/r")
mkdir.parents("/var/run/xend/boot/", stat.S_IRWXU)
while True:
fifo = "/var/run/xend/boot/xenbl.%s" %(random.randint(0, 32000),)
try:
os.mkfifo(fifo, 0600)
except OSError, e:
if (e.errno != errno.EEXIST):
raise
break
# We need to present the bootloader's tty as a pty slave that xenconsole
# can access. Since the bootloader itself needs a pty slave,
# we end up with a connection like this:
#
# xenconsole -- (slave pty1 master) <-> (master pty2 slave) -- bootloader
#
# where we copy characters between the two master fds, as well as
# listening on the bootloader's fifo for the results.
(m1, s1) = pty.openpty()
# On Solaris, the pty master side will get cranky if we try
# to write to it while there is no slave. To work around this,
# keep the slave descriptor open until we're done. Set it
# to raw terminal parameters, otherwise it will echo back
# characters, which will confuse the I/O loop below.
# Furthermore, a raw master pty device has no terminal
# semantics on Solaris, so don't try to set any attributes
# for it.
if os.uname()[0] != 'SunOS' and os.uname()[0] != 'NetBSD':
tty.setraw(m1)
os.close(s1)
else:
tty.setraw(s1)
fcntl.fcntl(m1, fcntl.F_SETFL, os.O_NDELAY)
slavename = ptsname.ptsname(m1)
dom.storeDom("console/tty", slavename)
# Release the domain lock here, because we definitely don't want
# a stuck bootloader to deny service to other xend clients.
from xen.xend import XendDomain
domains = XendDomain.instance()
domains.domains_lock.release()
(child, m2) = pty.fork()
if (not child):
args = [ blexec ]
if kernel:
args.append("--kernel=%s" % kernel)
if ramdisk:
args.append("--ramdisk=%s" % ramdisk)
if kernel_args:
args.append("--args=%s" % kernel_args)
if quiet:
args.append("-q")
args.append("--output=%s" % fifo)
if blargs:
args.extend(shlex.split(blargs))
args.append(disk)
try:
log.debug("Launching bootloader as %s." % str(args))
env = os.environ.copy()
env['TERM'] = 'vt100'
oshelp.close_fds()
os.execvpe(args[0], args, env)
except OSError, e:
print e
pass
os._exit(1)
# record that this domain is bootloading
dom.bootloader_pid = child
# On Solaris, the master pty side does not have terminal semantics,
# so don't try to set any attributes, as it will fail.
if os.uname()[0] != 'SunOS':
tty.setraw(m2);
fcntl.fcntl(m2, fcntl.F_SETFL, os.O_NDELAY);
while True:
try:
r = os.open(fifo, os.O_RDONLY)
except OSError, e:
if e.errno == errno.EINTR:
continue
break
fcntl.fcntl(r, fcntl.F_SETFL, os.O_NDELAY);
ret = ""
inbuf=""; outbuf="";
# filedescriptors:
# r - input from the bootloader (bootstring output)
# m1 - input/output from/to xenconsole
# m2 - input/output from/to pty that controls the bootloader
# The filedescriptors are NDELAY, so it's ok to try to read
# bigger chunks than may be available, to keep e.g. curses
# screen redraws in the bootloader efficient. m1 is the side that
# gets xenconsole input, which will be keystrokes, so a small number
# is sufficient. m2 is pygrub output, which will be curses screen
# updates, so a larger number (1024) is appropriate there.
#
# For writeable descriptors, only include them in the set for select
# if there is actual data to write, otherwise this would loop too fast,
# eating up CPU time.
while True:
wsel = []
if len(outbuf) != 0:
wsel = wsel + [m1]
if len(inbuf) != 0:
wsel = wsel + [m2]
sel = select.select([r, m1, m2], wsel, [])
try:
if m1 in sel[0]:
s = os.read(m1, 16)
inbuf += s
if m2 in sel[1]:
n = os.write(m2, inbuf)
inbuf = inbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
try:
if m2 in sel[0]:
s = os.read(m2, 1024)
outbuf += s
if m1 in sel[1]:
n = os.write(m1, outbuf)
outbuf = outbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
if r in sel[0]:
s = os.read(r, 128)
ret = ret + s
if len(s) == 0:
break
del inbuf
del outbuf
os.waitpid(child, 0)
os.close(r)
os.close(m2)
os.close(m1)
if os.uname()[0] == 'SunOS' or os.uname()[0] == 'NetBSD':
os.close(s1)
os.unlink(fifo)
# Re-acquire the lock to cover the changes we're about to make
# when we return to domain creation.
domains.domains_lock.acquire()
if dom.bootloader_pid is None:
msg = "Domain was died while the bootloader was running."
log.error(msg)
raise VmError, msg
dom.bootloader_pid = None
if len(ret) == 0:
msg = "Boot loader didn't return any data!"
log.error(msg)
raise VmError, msg
pin = sxp.Parser()
pin.input(ret)
pin.input_eof()
blcfg = pin.val
return blcfg
def bootloader_tidy(dom):
if hasattr(dom, "bootloader_pid") and dom.bootloader_pid is not None:
pid = dom.bootloader_pid
dom.bootloader_pid = None
os.kill(pid, signal.SIGKILL)
| sudkannan/xen-hv | tools/python/xen/xend/XendBootloader.py | Python | gpl-2.0 | 7,323 | 0.004779 |
# Brandon Michael
# cis142
# checkForQuadrant.py
# Goal: This program will keep asking for input values to check for the quadrant postion,
# origin, x-axis and y axis postions
# Notes: I used a while loop to make testing values easier and I used the input x,y
# Display program instructions
print("###################################################")
print("Quadrant Finder 1.0")
print("Enter the x and y coordinates to find the quadrant!")
print("Type [exit] to quit the program")
print("###################################################")
# Setup the x and y variables
xValue = None
yValue = None
# Setup a loop that breaks when you type exit
while True:
# Get the input values in a X,Y format
inputCoordinates = input("Type in coordinates [x,y]: ")
# Check if exit was typed, if so then exit the loop and end
if inputCoordinates == "exit":
break # stops the loop
# We want to make sure we can only strip out 2 input values
# and make sure there is a comma separating them
elif len(inputCoordinates.strip().split(',')) == 2 and inputCoordinates.count(',') == 1:
# Loop over the two numbers that are stripped out by the comma value
for coordinate in inputCoordinates.strip().split(','):
# This checks to see if we have set a value for x
# If it is still set to None then the first value is going to be xValue
if xValue is None:
xValue = int(coordinate)
# Since we are checking the xValue we can assume when the loop comes back
# a second time we can set it to yValue
else:
yValue = int(coordinate)
# If its a 0,0 value then its the Origin
if xValue == 0 and yValue == 0:
print("Origin")
else:
# If x = 0 and the y is greater or less than 0 its on the Y axis
if xValue == 0 and (yValue < 0 or yValue > 0):
print("Y - Axis")
# If x is greater or less than 0 and y = 0 its on the X axis
elif (xValue < 0 or xValue > 0) and yValue == 0:
print("X - Axis")
# Anything else and we need to check for quadrants
else:
# If x is a positive number and y is a negative positive its in Quadrant 1
if xValue > 0 and yValue > 0:
print("Quadrant I")
# If x is a negative number and y is a positive number then its in Quadrant 2
elif xValue < 0 and yValue > 0:
print("Quadrant II")
# If x is a negative number and y is negative number then its in Quadrant 3
elif xValue < 0 and yValue < 0:
print("Quadrant III")
# If x is a positive number and y is a negative number then its in Quadrant 4
elif xValue > 0 and yValue < 0:
print("Quadrant IV")
# If they typed anything but 2 numbers separated by a comma then ask for the input again
else:
print("Please type the input value as x,y")
print("Example: 1,-9")
| bwmichael/jccc-cis142-python | old/check-quadrant.py | Python | apache-2.0 | 3,124 | 0.003201 |
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
def init_config(conf):
opts = [
cfg.IntOpt('api_version', default=2),
cfg.StrOpt('endpoint_type', default='publicURL')
]
conf.register_opts(opts, group="glance")
return conf.glance
| sajuptpm/murano | contrib/plugins/murano_exampleplugin/murano_exampleplugin/cfg.py | Python | apache-2.0 | 822 | 0 |
"""
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
"""
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0
self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
first_word = line.split()[0].strip()
else:
first_word = 'Nothing'
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUnit
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False:
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No: ' + str(self.cur_no_of_units))
self.temp_unit = UnknownUnit()
self.temp_unit.readUnitData(self.unknown_data)
def getUnits(self):
"""Getter for imported units
Note:
Deprecated: Will be removed. Please use self.units directly.
Returns:
IsisUnitCollection - The units loaded from the dat file.
"""
return self.units
def updateSubContents(self):
"""Updates the self.units.
Appends the new temp_unit to list of units and resets all the
variables.
"""
#logger.debug('In updateSubContents')
# Don't update node count here as we aren't adding any 'new' nodes
self.units.addUnit(self.temp_unit, update_node_count=False, no_copy=True)
self.cur_no_of_units += 1
del self.temp_unit
self.unknown_data = []
def __loadFile(self, filepath):
"""Load the .dat file into the contents list.
Args:
filepath: Path to the required DAT file.
Returns:
True if loaded ok, False otherwise.
"""
logger.info('loading File: ' + filepath)
contents = []
try:
contents = ftools.getFile(filepath)
except IOError:
logger.error('IOError - Unable to load file')
return False
if(contents == None):
logger.error('.DAT file is empty at: ' + filepath)
return False
return contents
| duncan-r/SHIP | ship/utils/fileloaders/datloader.py | Python | mit | 9,666 | 0.001552 |
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import mock
import mox
from nova.api.ec2 import ec2utils
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.compute import test_compute
from nova.tests import fake_instance
from nova.tests import fake_instance_actions
from nova.tests import fake_notifier
from nova.tests.objects import test_migration
from nova import utils
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
migration_p = jsonutils.to_primitive(migration)
migration = self.conductor.migration_update(self.context, migration_p,
'finished')
self.assertEqual(migration['status'], 'finished')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get(self):
aggregate_ref = self._setup_aggregate_with_host()
aggregate = self.conductor.aggregate_get(self.context,
aggregate_ref['id'])
self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get_by_host(self):
self._setup_aggregate_with_host()
aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
self.assertEqual(aggregates[0]['availability_zone'], 'foo')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_instance_info_cache_delete(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
db.instance_info_cache_delete(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
def test_flavor_get(self):
self.mox.StubOutWithMock(db, 'flavor_get')
db.flavor_get(self.context, 'fake-id').AndReturn('fake-type')
self.mox.ReplayAll()
result = self.conductor.instance_type_get(self.context, 'fake-id')
self.assertEqual(result, 'fake-type')
def test_vol_get_usage_by_time(self):
self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
'fake-usage')
self.mox.ReplayAll()
result = self.conductor.vol_get_usage_by_time(self.context,
'fake-time')
self.assertEqual(result, 'fake-usage')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst,
'fake-update-time', False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], 'fake-values',
False).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
'fake-values', False)
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host')
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
notifications.bandwidth_usage(instance, 'start', True).AndReturn(
'bw_usage')
notifier = self.conductor_manager.notifier
compute_utils.notify_about_instance_usage(notifier,
self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_quota_commit(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
quota.QUOTAS.commit(self.context, 'reservations', project_id=None,
user_id=None)
quota.QUOTAS.commit(self.context, 'reservations', project_id='proj',
user_id='user')
self.mox.ReplayAll()
self.conductor.quota_commit(self.context, 'reservations')
self.conductor.quota_commit(self.context, 'reservations', 'proj',
'user')
def test_quota_rollback(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.rollback(self.context, 'reservations', project_id=None,
user_id=None)
quota.QUOTAS.rollback(self.context, 'reservations', project_id='proj',
user_id='user')
self.mox.ReplayAll()
self.conductor.quota_rollback(self.context, 'reservations')
self.conductor.quota_rollback(self.context, 'reservations', 'proj',
'user')
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_info_cache_update(self):
fake_values = {'key1': 'val1', 'key2': 'val2'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context, 'fake-uuid',
fake_values)
self.mox.ReplayAll()
self.conductor.instance_info_cache_update(self.context,
fake_inst,
fake_values)
def test_migration_get(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
self.assertEqual(jsonutils.to_primitive(migration),
self.conductor.migration_get(self.context,
migration['id']))
def test_migration_get_unconfirmed_by_dest_compute(self):
self.mox.StubOutWithMock(db,
'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
self.mox.ReplayAll()
self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
def test_compute_confirm_resize(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(
self.context, 'instance', migration='migration')
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, 'instance',
'migration')
def test_migration_create(self):
inst = {'uuid': 'fake-uuid',
'host': 'fake-host',
'node': 'fake-node'}
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(self.context.elevated(),
{'instance_uuid': inst['uuid'],
'source_compute': inst['host'],
'source_node': inst['node'],
'fake-key': 'fake-value'}).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.migration_create(self.context, inst,
{'fake-key': 'fake-value'})
self.assertEqual(result, 'result')
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id', 'device_name': 'foo'}
fake_bdm2 = {'id': 'fake-id', 'device_name': 'foo2'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context, fake_bdm2,
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=False)
db.block_device_mapping_update_or_create(
self.context, fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=None)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_bdm2 = {'id': 'fake-bdm-2',
'instance_uuid': 'fake-uuid2',
'device_name': '',
'volume_id': 'fake-vol-id2'}
fake_inst = {'uuid': 'fake-uuid'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(cells_rpcapi, 'bdm_destroy_at_top')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
cells_rpcapi.bdm_destroy_at_top(self.context,
fake_bdm['instance_uuid'],
device_name=fake_bdm['device_name'])
db.block_device_mapping_destroy(self.context, 'fake-bdm-2')
cells_rpcapi.bdm_destroy_at_top(self.context,
fake_bdm2['instance_uuid'],
volume_id=fake_bdm2['volume_id'])
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
cells_rpcapi.bdm_destroy_at_top(self.context, fake_inst['uuid'],
device_name='fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
cells_rpcapi.bdm_destroy_at_top(self.context, fake_inst['uuid'],
volume_id='fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
[fake_bdm,
fake_bdm2])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host')
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node')
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(rpc_common.ClientException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def test_compute_confirm_resize_with_objects(self):
# use an instance object rather than a dict
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance)
migration = test_migration.fake_db_migration()
mig_obj = migration_obj.Migration._from_db_object(
self.context.elevated(), migration_obj.Migration(),
migration)
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(
self.context, inst_obj, migration=mig_obj)
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, inst_obj,
mig_obj)
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(rpc_common.ClientException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(rpc_common.ClientException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def test_aggregate_metadata_add(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
metadata = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_add(
mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
metadata)
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_add(self.context,
aggregate,
metadata)
self.assertEqual(result, metadata)
def test_aggregate_metadata_delete(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
self.conductor.aggregate_metadata_delete(self.context, aggregate,
'fake')
def test_security_group_get_by_instance(self):
fake_inst = {'uuid': 'fake-instance'}
self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
db.security_group_get_by_instance(
self.context, fake_inst['uuid']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_get_by_instance(self.context,
fake_inst)
self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self):
fake_secgroup = {'id': 'fake-secgroup'}
self.mox.StubOutWithMock(db,
'security_group_rule_get_by_security_group')
db.security_group_rule_get_by_security_group(
self.context, fake_secgroup['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_rule_get_by_security_group(
self.context, fake_secgroup)
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
bdms=[fake_bdm])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, 'fake-bdm')
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context, [fake_bdm])
self.conductor.block_device_mapping_destroy_by_instance_and_device(
self.context, fake_inst, 'fake-device')
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host')
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise rpc_common.Timeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_instance_actions.stub_out_action_events(self.stubs)
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(instance_obj.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def test_cold_migrate(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor['extra_specs'] = 'extra_specs'
request_spec = {'instance_type': flavor}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'image_ref', mox.IsA(instance_obj.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(instance_obj.Instance)],
instance_type=flavor).AndReturn(request_spec)
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, request_spec, {}).AndReturn(hosts)
filter_properties = {'limits': {}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(instance_obj.Instance),
mox.IsA(dict), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [])
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [])
def test_build_instances(self):
instance_type = flavors.get_default_flavor()
system_metadata = flavors.save_flavor_info({}, instance_type)
# NOTE(alaski): instance_type -> system_metadata -> instance_type
# loses some data (extra_specs). This build process is using
# scheduler/utils:build_request_spec() which extracts flavor from
# system_metadata and will re-query the DB for extra_specs.. so
# we need to test this properly
expected_instance_type = flavors.extract_flavor(
{'system_metadata': system_metadata})
expected_instance_type['extra_specs'] = 'fake-specs'
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_rpcapi.run_instance(self.context,
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': {'system_metadata': system_metadata,
'uuid': 'fakeuuid'},
'instance_type': expected_instance_type,
'instance_uuids': ['fakeuuid', 'fakeuuid2'],
'block_device_mapping': 'block_device_mapping',
'security_group': 'security_groups',
'num_instances': 2},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks', is_first_time=True,
filter_properties={}, legacy_bdm_in_spec=False)
self.mox.ReplayAll()
self.conductor.build_instances(self.context,
instances=[{'uuid': 'fakeuuid',
'system_metadata': system_metadata},
{'uuid': 'fakeuuid2'}],
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_get_image')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._get_image(self.context,
'fake_image_id').AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host'}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', 'fake_image')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager, '_get_image',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'])])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_get_image')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._get_image(self.context,
'fake_image_id').AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host'}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', None)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def test_migrate_server_deals_with_expected_exceptions(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.DestinationHypervisorTooOld()
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.DestinationHypervisorTooOld,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_unexpected_exceptions(self):
instance = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = IOError()
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor.quotas, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
self.conductor.quotas.rollback(self.context, resvs)
self.mox.ReplayAll()
self.conductor._cold_migrate(self.context, inst_obj,
'flavor', filter_props, resvs)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor.quotas, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
self.conductor.quotas.rollback(self.context, resvs)
self.mox.ReplayAll()
self.conductor._cold_migrate(self.context, inst_obj,
'flavor', filter_props, resvs)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor.quotas, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec, filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
# context popped
expected_filter_props = dict()
# extra_specs popped
expected_request_spec = dict(instance_type=dict())
exc_info = test.TestingException('something happened')
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], resvs,
request_spec=expected_request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename']).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
expected_request_spec)
self.conductor.quotas.rollback(self.context, resvs)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, resvs)
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
| sacharya/nova | nova/tests/conductor/test_conductor.py | Python | apache-2.0 | 83,151 | 0.000457 |
"""
This state is used to create and manage ELBs.
Examples
========
.. code-block:: yaml
.webserver-elb:
elb.managed:
- name: webserver-elb
- region: us-west-1
- zones:
- us-west-1a
- us-west-1c
- listeners:
- [80, 80, 'http', 'http']
- [443, 80, 'https', 'http', 'my_ssl_certificate']
- subnets:
- subnet1
- subnet2
- security_groups:
- my_elb_security_group
- my_other_elb_security_group
- scheme: internet-facing
- health_check:
target: HTTP:80/health
timeout: 3
interval: 30
healthy_threshold: 4
unhealthy_threshold: 2
- policies:
80:
type: app
cookie_name: my_cookie
443:
type: lb
cookie_expire: 60
- instances:
- i-deadbeef
- i-01234abc
.bad-elb:
elb.absent:
- name: bad-elb
- region: us-west-1
.add-server:
elb.add:
- name: my-server
- region: us-west-1
- elb: webserver-elb
.rm-badserver:
elb.remove:
- name: badserver
- region: us-west-1
- elb: webserver-elb
"""
# This prevents pylint from yelling at me
__opts__ = {}
__salt__ = {}
def managed(
name,
region,
zones,
listeners=None,
subnets=None,
security_groups=None,
scheme=None,
health_check=None,
policies=None,
instances=None):
"""
Ensure an ELB exists
The arguments are the same as the ``elb.manage`` module
"""
return __salt__['aws_util.run_aws_module'](
'elb.manage', 'ELB', name, region, name, region, zones, listeners,
subnets, security_groups, scheme, health_check, policies, instances,
__opts__['test'])
def absent(name, region):
"""
Ensure an ELB does not exist
Parameters
----------
name : str
The name of the ELB
region : str
The AWS region the ELB is in
"""
return __salt__['aws_util.run_aws_module'](
'elb.delete', 'ELB', name, region, name, region, test=__opts__['test'])
def add(
name,
region,
elb):
"""
Add a server to an ELB
Parameters
----------
name : str
The name or instance id of the server
region : str
The AWS region
elb : str
The name of the ELB to add the server to
"""
return __salt__['aws_util.run_aws_module'](
'elb.add', "ELB", elb, region, name, region,
elb, test=__opts__['test'])
def remove(
name,
region,
elb):
"""
Remove a server from an ELB
Parameters
----------
name : str
The name or instance id of the server
region : str
The AWS region
elb : str
The name of the ELB to remove the server from
"""
return __salt__['aws_util.run_aws_module'](
'elb.remove', "ELB", elb, region, name, region,
elb, test=__opts__['test'])
| mathcamp/aws-formula | _states/elb.py | Python | mit | 3,106 | 0.001288 |
# Copyright (c) 2015 Fabian Kochem
| jameshy/libtree | tests/__init__.py | Python | mit | 35 | 0 |
"""."""
import numpy as np
import pyaccel
from siriuspy.namesys import SiriusPVName as _PVName
from siriuspy.devices import SOFB
from ..optimization import SimulAnneal
from ..utils import ThreadedMeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class Params(_ParamsBaseClass):
"""."""
def __init__(self):
"""."""
super().__init__()
self.deltas = {
'CH': 0.3e-3, 'CV': 0.15e-3, 'InjSept': 0.3e-3, 'InjKckr': 0.3e-3}
self.wait_time = 2
self.timeout_orb = 10
self.num_points = 10
class MeasureRespMatTBBO(_BaseClass):
"""."""
def __init__(self, all_corrs):
"""."""
super().__init__(params=Params(), target=self._measure_matrix_thread)
self.devices = {
'bo_sofb': SOFB(SOFB.DEVICES.BO),
'tb_sofb': SOFB(SOFB.DEVICES.TB),
}
self._all_corrs = all_corrs
self._matrix = dict()
self._corrs_to_measure = []
@property
def trajx(self):
"""."""
return np.hstack(
[self.devices['tb_sofb'].trajx, self.devices['bo_sofb'].trajx])
@property
def trajy(self):
"""."""
return np.hstack(
[self.devices['tb_sofb'].trajy, self.devices['bo_sofb'].trajy])
def wait(self, timeout=10):
"""."""
self.devices['tb_sofb'].wait_buffer(timeout=timeout)
self.devices['bo_sofb'].wait_buffer(timeout=timeout)
def reset(self, wait=0):
"""."""
if self._stopevt.wait(wait):
return False
self.devices['tb_sofb'].cmd_reset()
self.devices['bo_sofb'].cmd_reset()
if self._stopevt.wait(1):
return False
return True
@property
def corr_names(self):
"""."""
corrs = sorted([
c for c in self._all_corrs if not c.dev.startswith('CV')])
corrs.extend(sorted([
c for c in self._all_corrs if c.dev.startswith('CV')]))
return corrs
@property
def corrs_to_measure(self):
"""."""
if not self._corrs_to_measure:
return sorted(self._all_corrs.keys() - self._matrix.keys())
return self._corrs_to_measure
@corrs_to_measure.setter
def corrs_to_measure(self, value):
"""."""
self._corrs_to_measure = sorted([_PVName(n) for n in value])
@property
def matrix(self):
"""."""
mat = np.zeros([len(self._all_corrs), 2*self.trajx.size], dtype=float)
for i, cor in enumerate(self.corr_names):
line = self._matrix.get(cor)
if line is not None:
mat[i, :] = line
return mat
@property
def nr_points(self):
"""."""
return min(
self.devices['tb_sofb'].nr_points,
self.devices['bo_sofb'].nr_points)
@nr_points.setter
def nr_points(self, value):
self.devices['tb_sofb'].nr_points = int(value)
self.devices['bo_sofb'].nr_points = int(value)
def _measure_matrix_thread(self):
self.nr_points = self.params.num_points
corrs = self.corrs_to_measure
print('Starting...')
for i, cor in enumerate(corrs):
print('{0:2d}|{1:2d}: {2:20s}'.format(i, len(corrs), cor), end='')
orb = []
delta = self.params.deltas[cor.dev]
origkick = self._all_corrs[cor].strength
print('orig ', end='')
if not self.reset(self.params.wait_time):
break
self.wait(self.params.timeout_orb)
orb.append(-np.hstack([self.trajx, self.trajy]))
sig = -2*int(origkick > 0) + 1
print('pos' if sig > 0 else 'neg')
self._all_corrs[cor].strength = origkick + sig*delta
if not self.reset(self.params.wait_time):
break
self.wait(self.params.timeout_orb)
orb.append(np.hstack([self.trajx, self.trajy]))
self._all_corrs[cor].strength = origkick
if self._stopevt.is_set():
print('Stopped!')
break
else:
self._matrix[cor] = np.array(orb).sum(axis=0)/(sig*delta)
else:
print('Finished!')
def calc_model_respmatTBBO(
tb_mod, model, corr_names, elems, meth='middle', ishor=True):
"""."""
bpms = np.array(pyaccel.lattice.find_indices(model, 'fam_name', 'BPM'))[1:]
_, cumulmat = pyaccel.tracking.find_m44(
model, indices='open', fixed_point=[0, 0, 0, 0])
matrix = np.zeros((len(corr_names), 2*bpms.size))
for idx, corr in enumerate(corr_names):
elem = elems[corr]
indcs = np.array(elem.model_indices)
if corr.sec == 'BO':
print('Booster ', corr)
indcs += len(tb_mod)
cortype = elem.magnet_type
kxl = kyl = ksxl = ksyl = 0
if corr.dev == 'InjSept':
# kxl = tb_mod[indcs[0][1]].KxL
# kyl = tb_mod[indcs[0][1]].KyL
# ksxl = tb_mod[indcs[0][1]].KsxL
# ksyl = tb_mod[indcs[0][1]].KsyL
midx = pyaccel.lattice.find_indices(
tb_mod, 'fam_name', 'InjSeptM66')
for m in midx:
kxl += tb_mod[m].KxL
kyl += tb_mod[m].KyL
ksxl += tb_mod[m].KsxL
ksyl += tb_mod[m].KsyL
if not ishor and corr.dev in {'InjSept', 'InjKckr'}:
cortype = 'vertical'
matrix[idx, :] = _get_respmat_line(
cumulmat, indcs, bpms, length=elem.model_length,
kxl=kxl, kyl=kyl, ksxl=ksxl, ksyl=ksyl,
cortype=cortype, meth=meth)
return matrix
def _get_respmat_line(
cumul_mat, indcs, bpms, length, kxl=0, kyl=0, ksxl=0, ksyl=0,
cortype='vertical', meth='middle'):
idx = 3 if cortype.startswith('vertical') else 1
cor = indcs[0]
if meth.lower().startswith('end'):
cor = indcs[-1]+1
elif meth.lower().startswith('mid'):
# create a symplectic integrator of second order
# for the last half of the element:
drift = np.eye(4, dtype=float)
drift[0, 1] = length/2 / 2
drift[2, 3] = length/2 / 2
quad = np.eye(4, dtype=float)
quad[1, 0] = -kxl/2
quad[3, 2] = -kyl/2
quad[1, 2] = -ksxl/2
quad[3, 0] = -ksyl/2
half_cor = np.dot(np.dot(drift, quad), drift)
m0c = cumul_mat[cor]
if meth.lower().startswith('mid'):
m0c = np.linalg.solve(half_cor, m0c)
mat = np.linalg.solve(m0c.T, cumul_mat[bpms].transpose((0, 2, 1)))
mat = mat.transpose(0, 2, 1)
# if meth.lower().startswith('mid'):
# mat = np.dot(mat, half_cor)
respx = mat[:, 0, idx]
respy = mat[:, 2, idx]
respx[bpms < indcs[0]] = 0
respy[bpms < indcs[0]] = 0
return np.hstack([respx, respy])
class FindSeptQuad(SimulAnneal):
"""."""
def __init__(self, tb_model, bo_model, corr_names, elems,
respmat, nturns=5, save=False, in_sept=True):
"""."""
super().__init__(save=save)
self.tb_model = tb_model
self.bo_model = bo_model
self.corr_names = corr_names
self.elems = elems
self.nturns = nturns
self.respmat = respmat
self.in_sept = in_sept
def initialization(self):
"""."""
return
def calc_obj_fun(self):
"""."""
if self.in_sept:
sept_idx = pyaccel.lattice.find_indices(
self.tb_model, 'fam_name', 'InjSept')
else:
sept_idx = self.elems['TB-04:MA-CV-2'].model_indices
k, ks = self._position
pyaccel.lattice.set_attribute(self.tb_model, 'K', sept_idx, k)
pyaccel.lattice.set_attribute(self.tb_model, 'Ks', sept_idx, ks)
respmat = calc_model_respmatTBBO(
self.tb_model, self.bo_model, self.corr_names, self.elems)
respmat -= self.respmat
return np.sqrt(np.mean(respmat*respmat))
| lnls-fac/apsuite | apsuite/commisslib/measure_respmat_tbbo.py | Python | mit | 8,002 | 0.000125 |
import json
import mimetypes
import os
import re
import sys
from copy import copy
from functools import partial
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# JSON Vendor Tree spec: https://tools.ietf.org/html/rfc6838#section-3.2
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(vnd\..+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"""Construct a POST request."""
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PUT request."""
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PATCH request."""
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a DELETE request."""
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': method,
'SERVER_PORT': '443' if secure else '80',
'wsgi.url_scheme': 'https' if secure else 'http',
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': content_type,
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = force_bytes(parsed[4]).decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
raise exc_value
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""Request a response from the server using POST."""
response = super().post(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Request a response from the server using OPTIONS."""
response = super().options(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PUT."""
response = super().put(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PATCH."""
response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a DELETE request to the server."""
response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, 'get_user'):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, '_json'):
if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
response._json = json.loads(response.content.decode(), **extra)
return response._json
def _handle_redirects(self, response, **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
response = self.get(path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| uranusjr/django | django/test/client.py | Python | bsd-3-clause | 26,876 | 0.000595 |
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import logging
import threading
import gtk
from xl.nls import gettext as _
from xl import xdg, settings, event, devices
from xlgui import collection
logger = logging.getLogger(__name__)
class ManagerDialog(object):
"""
the device manager dialog
"""
def __init__(self, parent, main):
self.main = main
self.parent = parent
self.device_manager = self.main.exaile.devices
self.builder = gtk.Builder()
self.builder.add_from_file(xdg.get_data_path('ui/device_manager.ui'))
self.window = self.builder.get_object('device_manager')
self.window.set_transient_for(self.parent)
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.window.connect('delete-event', self.on_close)
self.builder.connect_signals({
'on_btn_connect_clicked': self.on_connect,
'on_btn_disconnect_clicked': self.on_disconnect,
'on_btn_edit_clicked': self.on_edit,
'on_btn_add_clicked': self.on_add,
'on_btn_remove_clicked': self.on_remove,
'on_btn_close_clicked': self.on_close,
})
# TODO: make these actually work. For now, they are hidden
for item in ('add', 'edit', 'remove'):
self.builder.get_object('btn_%s' % item).destroy()
# object should really be devices.Device, but it doesnt work :/
self.model = gtk.ListStore(object, gtk.gdk.Pixbuf, str, str)
self.tree = self.builder.get_object('tree_devices')
self.tree.set_model(self.model)
render = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn(_("Icon"), render)
col.add_attribute(render, "pixbuf", 1)
self.tree.append_column(col)
render = gtk.CellRendererText()
col = gtk.TreeViewColumn(_("Device"), render)
col.set_expand(True)
col.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
col.add_attribute(render, "text", 2)
self.tree.append_column(col)
render = gtk.CellRendererText()
col = gtk.TreeViewColumn(_("Driver"), render)
col.add_attribute(render, "text", 3)
self.tree.append_column(col)
self.populate_tree()
event.add_callback(self.populate_tree, 'device_added')
event.add_callback(self.populate_tree, 'device_removed')
def populate_tree(self, *args):
self.model.clear()
for d in self.device_manager.list_devices():
self.model.append([d, None, d.get_name(), d.__class__.__name__])
def _get_selected_devices(self):
sel = self.tree.get_selection()
(model, paths) = sel.get_selected_rows()
devices = []
for path in paths:
iter = self.model.get_iter(path)
device = self.model.get_value(iter, 0)
devices.append(device)
return devices
def on_connect(self, *args):
devices = self._get_selected_devices()
for d in devices:
d.connect()
def on_disconnect(self, *args):
devices = self._get_selected_devices()
for d in devices:
d.disconnect()
def on_edit(self, *args):
logger.warning("NOT IMPLEMENTED")
def on_add(self, *args):
logger.warning("NOT IMPLEMENTED")
def on_remove(self, *args):
logger.warning("NOT IMPLEMENTED")
def on_close(self, *args):
self.window.hide()
self.window.destroy()
def run(self):
self.window.show_all()
| eri-trabiccolo/exaile | xlgui/devices.py | Python | gpl-2.0 | 4,732 | 0.000634 |
import nose
from nose.tools import *
from unittest import TestCase
from datetime import datetime, timedelta
from repo.date_iterator import DateIterator
class DateIteratorTests(TestCase):
def test_date_iterator_returns_self_on_iter(self):
d = DateIterator(datetime.now(), datetime.now())
eq_(d, d.__iter__())
def test_date_iterator_gives_first_date_as_start_date(self):
start = datetime(2011, 3, 3)
end = datetime(2011, 3, 4)
d = DateIterator(start, end)
first = d.next()
eq_(start, first)
def test_date_iterator_gives_next_date_30_days_by_default(self):
start = datetime(2011, 3, 3)
next = datetime(2011, 4, 2)
end = datetime(2011, 4, 3)
d = DateIterator(start, end)
first = d.next()
second = d.next()
eq_(next, second)
def test_date_iterator_gives_next_date_7_days(self):
start = datetime(2011, 3, 3)
next = datetime(2011, 3, 10)
end = datetime(2011, 3, 14)
d = DateIterator(start, end, delta=timedelta(days=7))
first = d.next()
second = d.next()
eq_(next, second)
@raises(StopIteration)
def test_date_iterator_raises_stop_exception(self):
start = datetime(2011, 3, 3)
end = datetime(2011, 4, 1)
d = DateIterator(start, end)
first = d.next()
second = d.next()
| markdrago/caboose | src/test/repo/date_iterator_tests.py | Python | mit | 1,409 | 0.002839 |
"""
dal_dht11 v1.0.0
Auteur: Bruno DELATTRE
Date : 19/09/2016
"""
from lib import com_logger
class DAL_DHT22:
def __init__(self, connection, cursor):
self.connection = connection
self.cursor = cursor
self.logger = com_logger.Logger('DHT22 DAL')
""" Select"""
def get_dht22(self, lastdate):
try:
self.cursor.execute('SELECT date, name, temperature, humidity FROM DHT22 WHERE date > "' + lastdate + '"')
rows = self.cursor.fetchall()
return rows
except Exception as exp:
self.logger.error(repr(exp))
self.connection.rollback()
def get_lastdata(self):
try:
self.cursor.execute('SELECT MAX(date) FROM DHT22')
rows = self.cursor.fetchall()
return rows
except Exception as exp:
self.logger.error(repr(exp))
self.connection.rollback()
""" Insert """
def set_dht22(self, name, temperature, humidity):
try:
self.cursor.execute(
'INSERT INTO DHT22 (date, name, temperature, humidity) VALUES (datetime("now","localtime"),"' + str(name) + '","' + str(temperature)[:4] + '","' + str(humidity)[:4] + '")')
self.connection.commit()
except Exception as exp:
self.logger.error(repr(exp))
self.connection.rollback()
| delattreb/TemperatureHumidityServer | src/dal/dal_dht22.py | Python | gpl-3.0 | 1,407 | 0.004975 |
"""SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from . import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| timj/scons | src/engine/SCons/Tool/sgicc.py | Python | mit | 1,780 | 0.001685 |
import os
import types
import binascii
from django.db import models
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_str as smart_text
from keyczar import keyczar
class EncryptedFieldException(Exception):
pass
# Simple wrapper around keyczar to standardize the initialization
# of the crypter object and allow for others to extend as needed.
class KeyczarWrapper(object):
def __init__(self, keyname, *args, **kwargs):
self.crypter = keyczar.Crypter.Read(keyname)
def encrypt(self, cleartext):
return self.crypter.Encrypt(cleartext)
def decrypt(self, ciphertext):
return self.crypter.Decrypt(ciphertext)
class EncryptedFieldMixin(object, metaclass=models.SubfieldBase):
"""
EncryptedFieldMixin will use keyczar to encrypt/decrypt data that is being
marshalled in/out of the database into application Django model fields.
This is very helpful in ensuring that data at rest is encrypted and
minimizing the effects of SQL Injection or insider access to sensitive
databases containing sensitive information.
The most basic use of this mixin is to have a single encryption key for all
data in your database. This lives in a Keyczar key directory specified by:
the setting - settings.ENCRYPTED_FIELDS_KEYDIR -
Optionally, you can name specific encryption keys for data-specific purposes
in your model such as:
special_data = EncrytpedCharField( ..., keyname='special_data' )
The Mixin will handle the encryption/decryption seamlessly, but native
SQL queries may need a way to filter data that is encrypted. Using the
optional 'prefix' kwarg will prepend a static identifier to your encrypted
data before it is written to the database.
There are other use cases where you may not wish to encrypt all of the data
in a database. For example, if you have a survey application that allows
users to enter arbitrary questions and answers, users may request sensitive
information to be stored such as SSN, Driver License #, Credit Card, etc.
Your application can detect these sensitive fields, manually encrypt the
data and store that in the database mixed with other cleartext data.
The model should then only decrypt the specific fields needed. Use the
kwarg 'decrypt_only' to specify this behavior and the model will not
encrypt the data inbound and only attempt to decrypt outbound.
Encrypting data will significantly change the size of the data being stored
and this may cause issues with your database column size. Before storing
any encrypted data in your database, ensure that you have the proper
column width otherwise you may experience truncation of your data depending
on the database engine in use.
To have the mixin enforce max field length, either:
a) set ENFORCE_MAX_LENGTH = True in your settings files
b) set 'enforce_max_length' to True in the kwargs of your model.
A ValueError will be raised if the encrypted length of the data (including
prefix if specified) is greater than the max_length of the field.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the EncryptedFieldMixin with the following
optional settings:
* keyname: The name of the keyczar key
* crypter_klass: A custom class that is extended from Keyczar.
* prefix: A static string prepended to all encrypted data
* decrypt_only: Boolean whether to only attempt to decrypt data coming
from the database and not attempt to encrypt the data
being written to the database.
"""
# Allow for custom class extensions of Keyczar.
self._crypter_klass = kwargs.pop('crypter_klass', KeyczarWrapper)
self.keyname = kwargs.pop('keyname', None)
# If settings.DEFAULT_KEY_DIRECTORY, then the key
# is located in DEFAULT_KEY_DIRECTORY/keyname
if self.keyname:
if hasattr(settings, 'DEFAULT_KEY_DIRECTORY'):
self.keydir = os.path.join(
settings.DEFAULT_KEY_DIRECTORY,
self.keyname
)
else:
raise ImproperlyConfigured(
'You must set settings.DEFAULT_KEY_DIRECTORY'
'when using the keyname kwarg'
)
# If the keyname is not defined on a per-field
# basis, then check for the global data encryption key.
if not self.keyname and hasattr(settings, 'ENCRYPTED_FIELDS_KEYDIR'):
self.keydir = settings.ENCRYPTED_FIELDS_KEYDIR
# If we still do not have a keydir, then raise an exception
if not self.keydir:
raise ImproperlyConfigured(
'You must set settings.ENCRYPTED_FIELDS_KEYDIR '
'or name a key with kwarg `keyname`'
)
# The name of the keyczar key without path for logging purposes.
self.keyname = os.path.dirname(self.keydir)
# Prefix encrypted data with a static string to allow filtering
# of encrypted data vs. non-encrypted data using vanilla MySQL queries.
self.prefix = kwargs.pop('prefix', '')
# Allow for model decryption-only, bypassing encryption of data.
# Useful for models that have a sparse amount of data that is required
# to be encrypted.
self.decrypt_only = kwargs.pop('decrypt_only', False)
self._crypter = self._crypter_klass(self.keydir)
# Ensure the encrypted data does not exceed the max_length
# of the database. Data truncation is a possibility otherwise.
self.enforce_max_length = getattr(settings, 'ENFORCE_MAX_LENGTH', False)
if not self.enforce_max_length:
self.enforce_max_length = kwargs.pop('enforce_max_length', False)
super(EncryptedFieldMixin, self).__init__(*args, **kwargs)
def crypter(self):
return self._crypter
def get_internal_type(self):
return 'TextField'
def to_python(self, value):
if value is None or not isinstance(value, str):
return value
if self.prefix and value.startswith(self.prefix):
value = value[len(self.prefix):]
try:
value = self.crypter().decrypt(value)
# value = value.decode('unicode_escape')
except keyczar.errors.KeyczarError:
pass
except UnicodeEncodeError:
pass
except binascii.Error:
pass
return super(EncryptedFieldMixin, self).to_python(value)
def get_prep_value(self, value):
value = super(EncryptedFieldMixin, self).get_prep_value(value)
if value is None or value == '' or self.decrypt_only:
return value
if isinstance(value, str):
value = value.encode('unicode_escape')
# value = value.encode('ascii')
else:
value = str(value)
return self.prefix + self.crypter().encrypt(value)
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
if self.enforce_max_length:
if (
value
and hasattr(self, 'max_length')
and self.max_length
and len(value) > self.max_length
):
raise ValueError(
'Field {0} max_length={1} encrypted_len={2}'.format(
self.name,
self.max_length,
len(value),
)
)
return value
class EncryptedCharField(EncryptedFieldMixin, models.CharField):
pass
class EncryptedTextField(EncryptedFieldMixin, models.TextField):
pass
class EncryptedDateTimeField(EncryptedFieldMixin, models.DateTimeField):
pass
class EncryptedIntegerField(EncryptedFieldMixin, models.IntegerField):
@cached_property
def validators(self):
"""
See issue https://github.com/defrex/django-encrypted-fields/issues/7
Need to keep all field validators, but need to change `get_internal_type` on the fly
to prevent fail in django 1.7.
"""
self.get_internal_type = lambda: 'IntegerField'
return models.IntegerField.validators.__get__(self)
class EncryptedDateField(EncryptedFieldMixin, models.DateField):
pass
class EncryptedFloatField(EncryptedFieldMixin, models.FloatField):
pass
class EncryptedEmailField(EncryptedFieldMixin, models.EmailField):
pass
class EncryptedBooleanField(EncryptedFieldMixin, models.BooleanField):
pass
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^encrypted_fields\.fields\.\w+Field'])
except ImportError:
pass
| gerhc/django-encrypted-fields | encrypted_fields/fields.py | Python | mit | 9,173 | 0.000654 |
# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
"""Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open("somefile", "r+")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write("foo")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
Exceptions:
LockException
Notes:
For the 'nt' platform, this module requires the Python Extensions for Windows.
Be aware that this may not work as expected on Windows 95/98/ME.
History:
I learned the win32 technique for locking files from sample code
provided by John Nielsen <nielsenjf@my-deja.com> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <jdf@pobox.com>,
Lowell Alleman <lalleman@mfps.com>,
Rick van Hattem <Rick.van.Hattem@Fawo.nl>
Version: 0.3
URL: https://github.com/WoLpH/portalocker
"""
__all__ = [
"lock",
"unlock",
"LOCK_EX",
"LOCK_SH",
"LOCK_NB",
"LockException",
]
import os
class LockException(Exception):
# Error codes:
LOCK_FAILED = 1
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
elif os.name == 'posix':
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
else:
raise RuntimeError("PortaLocker only defined for nt and posix platforms")
if os.name == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
# error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.')
if exc_value[0] == 33:
raise LockException(LockException.LOCK_FAILED, exc_value[2])
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
if exc_value[0] == 158:
# error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
# To match the 'posix' implementation, silently ignore this error
pass
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
elif os.name == 'posix':
def lock(file, flags):
try:
fcntl.flock(file.fileno(), flags)
except IOError, exc_value:
# The exception code varies on different systems so we'll catch
# every IO error
raise LockException(*exc_value)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
if __name__ == '__main__':
from time import time, strftime, localtime
import sys
import portalocker
log = open('log.txt', "a+")
portalocker.lock(log, portalocker.LOCK_EX)
timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time()))
log.write( timestamp )
print "Wrote lines. Hit enter to release lock."
dummy = sys.stdin.readline()
log.close()
| dsuch/ConcurrentLogHandler | src/portalocker.py | Python | apache-2.0 | 3,780 | 0.003704 |
import os
import shutil
import logging
import csv
from dateutil import parser
from django.dispatch import receiver
from hs_core.signals import pre_create_resource, pre_add_files_to_resource, \
pre_delete_file_from_resource, post_add_files_to_resource, post_create_resource, \
pre_metadata_element_create, pre_metadata_element_update
from hs_core.hydroshare import utils, delete_resource_file_only, resource_modified
from hs_app_timeseries.models import TimeSeriesResource, TimeSeriesMetaData
from .forms import SiteValidationForm, VariableValidationForm, MethodValidationForm, \
ProcessingLevelValidationForm, TimeSeriesResultValidationForm, UTCOffSetValidationForm
from hs_file_types.models.timeseries import extract_metadata, validate_odm2_db_file, \
extract_cv_metadata_from_blank_sqlite_file, validate_csv_file, add_blank_sqlite_file
FILE_UPLOAD_ERROR_MESSAGE = "(Uploaded file was not added to the resource)"
@receiver(pre_create_resource, sender=TimeSeriesResource)
def resource_pre_create_handler(sender, **kwargs):
# if needed more actions can be taken here before the TimeSeries resource is created
pass
@receiver(pre_add_files_to_resource, sender=TimeSeriesResource)
def pre_add_files_to_resource_handler(sender, **kwargs):
# file upload is not allowed if the resource already
# has either a sqlite file or a csv file
resource = kwargs['resource']
files = kwargs['files']
validate_files_dict = kwargs['validate_files']
source_names = kwargs['source_names']
if __debug__:
assert(isinstance(source_names, list))
if files or source_names:
if resource.has_sqlite_file or resource.has_csv_file:
validate_files_dict['are_files_valid'] = False
validate_files_dict['message'] = 'Resource already has the necessary content files.'
@receiver(pre_delete_file_from_resource, sender=TimeSeriesResource)
def pre_delete_file_from_resource_handler(sender, **kwargs):
# if any of the content files (sqlite or csv) is deleted then reset the 'is_dirty' attribute
# for all extracted metadata to False
resource = kwargs['resource']
def reset_metadata_elements_is_dirty(elements):
# filter out any non-dirty element
elements = [element for element in elements if element.is_dirty]
for element in elements:
element.is_dirty = False
element.save()
if resource.metadata.is_dirty:
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False)
# metadata object is_dirty attribute for some reason can't be set using the following
# 2 lines of code
# resource.metadata.is_dirty=False
# resource.metadata.save()
reset_metadata_elements_is_dirty(resource.metadata.sites.all())
reset_metadata_elements_is_dirty(resource.metadata.variables.all())
reset_metadata_elements_is_dirty(resource.metadata.methods.all())
reset_metadata_elements_is_dirty(resource.metadata.processing_levels.all())
reset_metadata_elements_is_dirty(resource.metadata.time_series_results.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_variable_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_variable_names.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_speciations.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_elevation_datums.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_site_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_method_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_units_types.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_statuses.all())
reset_metadata_elements_is_dirty(resource.metadata.cv_aggregation_statistics.all())
@receiver(post_add_files_to_resource, sender=TimeSeriesResource)
def post_add_files_to_resource_handler(sender, **kwargs):
resource = kwargs['resource']
files = kwargs['files']
validate_files_dict = kwargs['validate_files']
user = kwargs['user']
source_names = kwargs['source_names']
if __debug__:
assert(isinstance(source_names, list))
if files:
file_name = files[0].name
elif source_names:
file_name = os.path.basename(source_names[0])
# extract metadata from the just uploaded file
uploaded_file_to_process = None
uploaded_file_ext = ''
for res_file in resource.files.all():
_, res_file_name, uploaded_file_ext = utils.get_resource_file_name_and_extension(res_file)
if res_file_name == file_name:
uploaded_file_to_process = res_file
break
if uploaded_file_to_process:
if uploaded_file_ext == ".sqlite":
_process_uploaded_sqlite_file(user, resource, uploaded_file_to_process,
validate_files_dict,
delete_existing_metadata=True)
elif uploaded_file_ext == ".csv":
_process_uploaded_csv_file(resource, uploaded_file_to_process, validate_files_dict,
user, delete_existing_metadata=True)
@receiver(post_create_resource, sender=TimeSeriesResource)
def post_create_resource_handler(sender, **kwargs):
resource = kwargs['resource']
validate_files_dict = kwargs['validate_files']
user = kwargs['user']
# extract metadata from the just uploaded file
res_file = resource.files.all().first()
if res_file:
# check if the uploaded file is a sqlite file or csv file
file_ext = utils.get_resource_file_name_and_extension(res_file)[2]
if file_ext == '.sqlite':
# metadata can exist at this point if a timeseries resource is created
# using REST API since the API caller can pass metadata information. Before
# metadata can be extracted from the sqlite file and populated to database, existing
# metadata needs to be deleted.
_process_uploaded_sqlite_file(user, resource, res_file, validate_files_dict,
delete_existing_metadata=True)
elif file_ext == '.csv':
_process_uploaded_csv_file(resource, res_file, validate_files_dict, user,
delete_existing_metadata=False)
# since we are extracting metadata after resource creation
# metadata xml files need to be regenerated - so need to set the
# dirty bag flags
utils.set_dirty_bag_flag(resource)
def _process_uploaded_csv_file(resource, res_file, validate_files_dict, user,
delete_existing_metadata=True):
# get the csv file from iRODS to a temp directory
fl_obj_name = utils.get_file_from_irods(res_file)
validate_err_message = validate_csv_file(fl_obj_name)
if not validate_err_message:
# first delete relevant existing metadata elements
if delete_existing_metadata:
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False)
_delete_extracted_metadata(resource)
# delete the sqlite file if it exists
_delete_resource_file(resource, ".sqlite")
# add the blank sqlite file
add_blank_sqlite_file(resource, upload_folder='')
resource_modified(resource, user, overwrite_bag=False)
# populate CV metadata django models from the blank sqlite file
extract_cv_metadata_from_blank_sqlite_file(resource)
else: # file validation failed
# delete the invalid file just uploaded
delete_resource_file_only(resource, res_file)
validate_files_dict['are_files_valid'] = False
validate_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE)
validate_files_dict['message'] = validate_err_message
# cleanup the temp csv file
if os.path.exists(fl_obj_name):
shutil.rmtree(os.path.dirname(fl_obj_name))
def _process_uploaded_sqlite_file(user, resource, res_file, validate_files_dict,
delete_existing_metadata=True):
# check if it a sqlite file
fl_ext = utils.get_resource_file_name_and_extension(res_file)[2]
if fl_ext == '.sqlite':
# get the file from iRODS to a temp directory
fl_obj_name = utils.get_file_from_irods(res_file)
validate_err_message = validate_odm2_db_file(fl_obj_name)
if not validate_err_message:
# first delete relevant existing metadata elements
if delete_existing_metadata:
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False)
_delete_extracted_metadata(resource)
extract_err_message = extract_metadata(resource, fl_obj_name)
if extract_err_message:
# delete the invalid file
delete_resource_file_only(resource, res_file)
# cleanup any extracted metadata
_delete_extracted_metadata(resource)
validate_files_dict['are_files_valid'] = False
extract_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE)
validate_files_dict['message'] = extract_err_message
else:
# set metadata is_dirty to False
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False)
# delete the csv file if it exists
_delete_resource_file(resource, ".csv")
utils.resource_modified(resource, user, overwrite_bag=False)
else: # file validation failed
# delete the invalid file just uploaded
delete_resource_file_only(resource, res_file)
validate_files_dict['are_files_valid'] = False
validate_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE)
validate_files_dict['message'] = validate_err_message
# cleanup the temp file
if os.path.exists(fl_obj_name):
shutil.rmtree(os.path.dirname(fl_obj_name))
else:
# delete the invalid file
delete_resource_file_only(resource, res_file)
validate_files_dict['are_files_valid'] = False
err_message = "The uploaded file not a sqlite file. {}"
err_message += err_message.format(FILE_UPLOAD_ERROR_MESSAGE)
validate_files_dict['message'] = err_message
@receiver(pre_metadata_element_create, sender=TimeSeriesResource)
def metadata_element_pre_create_handler(sender, **kwargs):
element_name = kwargs['element_name'].lower()
request = kwargs['request']
return _validate_metadata(request, element_name)
@receiver(pre_metadata_element_update, sender=TimeSeriesResource)
def metadata_element_pre_update_handler(sender, **kwargs):
element_name = kwargs['element_name'].lower()
request = kwargs['request']
return _validate_metadata(request, element_name)
def _validate_metadata(request, element_name):
if element_name == "site":
element_form = SiteValidationForm(request.POST)
elif element_name == 'variable':
element_form = VariableValidationForm(request.POST)
elif element_name == 'method':
element_form = MethodValidationForm(request.POST)
elif element_name == 'processinglevel':
element_form = ProcessingLevelValidationForm(request.POST)
elif element_name == 'timeseriesresult':
element_form = TimeSeriesResultValidationForm(request.POST)
elif element_name == 'utcoffset':
element_form = UTCOffSetValidationForm(request.POST)
else:
raise Exception("Invalid metadata element name:{}".format(element_name))
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
def _delete_extracted_metadata(resource):
resource.metadata.title.delete()
if resource.metadata.description:
resource.metadata.description.delete()
TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(value_counts={})
resource.metadata.creators.all().delete()
resource.metadata.contributors.all().delete()
resource.metadata.coverages.all().delete()
resource.metadata.subjects.all().delete()
resource.metadata.relations.all().delete()
resource.metadata.sites.delete()
resource.metadata.variables.delete()
resource.metadata.methods.delete()
resource.metadata.processing_levels.delete()
resource.metadata.time_series_results.delete()
if resource.metadata.utc_offset:
resource.metadata.utc_offset.delete()
# delete CV lookup django tables
resource.metadata.cv_variable_types.all().delete()
resource.metadata.cv_variable_names.all().delete()
resource.metadata.cv_speciations.all().delete()
resource.metadata.cv_elevation_datums.all().delete()
resource.metadata.cv_site_types.all().delete()
resource.metadata.cv_method_types.all().delete()
resource.metadata.cv_units_types.all().delete()
resource.metadata.cv_statuses.all().delete()
resource.metadata.cv_mediums.all().delete()
resource.metadata.cv_aggregation_statistics.all().delete()
# add the title element as "Untitled resource"
res_title = 'Untitled resource'
resource.metadata.create_element('title', value=res_title)
# add back the resource creator as the creator in metadata
if resource.creator.first_name:
first_creator_name = "{first_name} {last_name}".format(
first_name=resource.creator.first_name, last_name=resource.creator.last_name)
else:
first_creator_name = resource.creator.username
first_creator_email = resource.creator.email
resource.metadata.create_element('creator', name=first_creator_name, email=first_creator_email,
order=1)
def _validate_csv_file(resource, uploaded_csv_file_name):
err_message = "Uploaded file is not a valid timeseries csv file."
log = logging.getLogger()
with open(uploaded_csv_file_name, 'r') as fl_obj:
csv_reader = csv.reader(fl_obj, delimiter=',')
# read the first row
header = next(csv_reader)
header = [el.strip() for el in header]
if any(len(h) == 0 for h in header):
err_message += " Column heading is missing."
log.error(err_message)
return err_message
# check that there are at least 2 headings
if len(header) < 2:
err_message += " There needs to be at least 2 columns of data."
log.error(err_message)
return err_message
# check the header has only string values
for hdr in header:
try:
float(hdr)
err_message += " Column heading must be a string."
log.error(err_message)
return err_message
except ValueError:
pass
# check that there are no duplicate column headings
if len(header) != len(set(header)):
err_message += " There are duplicate column headings."
log.error(err_message)
return err_message
# process data rows
for row in csv_reader:
# check that data row has the same number of columns as the header
if len(row) != len(header):
err_message += " Number of columns in the header is not same as the data columns."
log.error(err_message)
return err_message
# check that the first column data is of type datetime
try:
parser.parse(row[0])
except Exception:
err_message += " Data for the first column must be a date value."
log.error(err_message)
return err_message
# check that the data values (2nd column onwards) are of numeric
for data_value in row[1:]:
try:
float(data_value)
except ValueError:
err_message += " Data values must be numeric."
log.error(err_message)
return err_message
return None
def _delete_resource_file(resource, file_ext):
for res_file in resource.files.all():
_, _, res_file_ext = utils.get_resource_file_name_and_extension(res_file)
if res_file_ext == file_ext:
delete_resource_file_only(resource, res_file)
| hydroshare/hydroshare | hs_app_timeseries/receivers.py | Python | bsd-3-clause | 16,631 | 0.002465 |
"""Test for Remote Execution
:Requirement: Remoteexecution
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: RemoteExecution
:Assignee: pondrejk
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from nailgun import client
from nailgun.entity_mixins import TaskFailedError
from robottelo.api.utils import wait_for_tasks
CAPSULE_TARGET_VERSION = '6.10.z'
@pytest.mark.tier4
def test_positive_run_capsule_upgrade_playbook(capsule_configured, default_sat):
"""Run Capsule Upgrade playbook against an External Capsule
:id: 9ec6903d-2bb7-46a5-8002-afc74f06d83b
:steps:
1. Create a Capsule VM, add REX key.
2. Run the Capsule Upgrade Playbook.
:expectedresults: Capsule is upgraded successfully
:CaseImportance: Medium
"""
template_id = (
default_sat.api.JobTemplate()
.search(query={'search': 'name="Capsule Upgrade Playbook"'})[0]
.id
)
capsule_configured.add_rex_key(satellite=default_sat)
job = default_sat.api.JobInvocation().run(
synchronous=False,
data={
'job_template_id': template_id,
'inputs': {
'target_version': CAPSULE_TARGET_VERSION,
'whitelist_options': 'repositories-validate,repositories-setup',
},
'targeting_type': 'static_query',
'search_query': f'name = {capsule_configured.hostname}',
},
)
wait_for_tasks(f'resource_type = JobInvocation and resource_id = {job["id"]}')
result = default_sat.api.JobInvocation(id=job['id']).read()
assert result.succeeded == 1
result = default_sat.execute('foreman-maintain health check')
assert result.status == 0
for line in result.stdout:
assert 'FAIL' not in line
result = default_sat.api.SmartProxy(
id=default_sat.api.SmartProxy(name=default_sat.hostname).search()[0].id
).refresh()
feature_list = [feat['name'] for feat in result['features']]
assert {'Discovery', 'Dynflow', 'Ansible', 'SSH', 'Logs', 'Pulp'}.issubset(feature_list)
@pytest.mark.destructive
def test_negative_run_capsule_upgrade_playbook_on_satellite(default_sat):
"""Run Capsule Upgrade playbook against the Satellite itself
:id: 99462a11-5133-415d-ba64-4354da539a34
:steps:
1. Add REX key to the Satellite server.
2. Run the Capsule Upgrade Playbook.
3. Check the job output for proper failure reason.
:expectedresults: Should fail
:CaseImportance: Medium
"""
sat = default_sat.nailgun_host
template_id = (
default_sat.api.JobTemplate()
.search(query={'search': 'name="Capsule Upgrade Playbook"'})[0]
.id
)
default_sat.add_rex_key(satellite=default_sat)
with pytest.raises(TaskFailedError) as error:
default_sat.api.JobInvocation().run(
data={
'job_template_id': template_id,
'inputs': {
'target_version': CAPSULE_TARGET_VERSION,
'whitelist_options': "repositories-validqqate,repositories-setup",
},
'targeting_type': "static_query",
'search_query': f"name = {sat.name}",
}
)
assert 'A sub task failed' in error.value.args[0]
job = default_sat.api.JobInvocation().search(
query={'search': f'host={sat.name},status=failed,description="Capsule Upgrade Playbook"'}
)[0]
response = client.get(
f'{default_sat.url}/api/job_invocations/{job.id}/hosts/{sat.id}',
auth=(default_sat.username, default_sat.password),
verify=False,
)
assert 'This playbook cannot be executed on a Satellite server.' in response.text
| rplevka/robottelo | tests/foreman/api/test_remoteexecution.py | Python | gpl-3.0 | 3,759 | 0.001862 |
#!/usr/bin/python
# Tool to compare MPlayer translation files against a base file. Reports
# conflicting definitions, mismatching arguments, extra definitions
# not present in the base file and (optionally) missing definitions.
# Written by Uoti Urpala
import sys
import re
def parse(filename):
r = {}
f = open(filename)
it = iter(f)
cur = ''
for line in it:
line = line.strip()
if not line.startswith('#define'):
while line and line[-1] == '\\':
line = it.next().strip()
continue
try:
_, name, value = line.split(None, 2)
except ValueError:
if name in r:
continue
value = value.strip('"')
while line[-1] == '\\':
line = it.next().strip()
value += line.rstrip('\\').strip('"')
if name in r:
print 'Conflict: ', name
print r[name]
print value
print
r[name] = value
f.close()
return r
def compare(base, other, show_missing=False):
r = re.compile('%[^diouxXeEfFgGaAcspn%]*[diouxXeEfFgGaAcspn%]')
missing = []
for key in base:
if key not in other:
missing.append(key)
continue
if re.findall(r, base[key]) != re.findall(r, other[key]):
print 'Mismatch: ', key
print base[key]
print other[key]
print
del other[key]
if other:
extra = other.keys()
extra.sort()
print 'Extra: ', ' '.join(extra)
if show_missing and missing:
missing.sort()
print 'Missing: ', ' '.join(missing)
if len(sys.argv) < 3:
print 'Usage:\n'+sys.argv[0]+' [--missing] base_helpfile otherfile1 '\
'[otherfile2 ...]'
sys.exit(1)
i = 1
show_missing = False
if sys.argv[i] in ( '--missing', '-missing' ):
show_missing = True
i = 2
base = parse(sys.argv[i])
for filename in sys.argv[i+1:]:
print '*****', filename
compare(base, parse(filename), show_missing)
print '\n'
| philipl/mplayer | TOOLS/mphelp_check.py | Python | gpl-2.0 | 2,073 | 0.002412 |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: contextlib
import sys
from functools import wraps
from warnings import warn
__all__ = ['contextmanager', 'nested', 'closing']
class GeneratorContextManager(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
return exc is not value
except:
if sys.exc_info()[1] is not value:
raise
return
def contextmanager(func):
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
warn('With-statements now directly support multiple context managers', DeprecationWarning, 3)
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
raise exc[0], exc[1], exc[2]
return
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close() | DedMemez/ODS-August-2017 | contextlib.py | Python | apache-2.0 | 2,267 | 0.002647 |
""" User factory """
import factory
from smserver import models
from test.factories import base
from test.factories.room_factory import RoomFactory
class UserFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.User
name = factory.Sequence(lambda n: "User %s" % (n+1))
rank = 1
stepmania_version = "123"
@classmethod
def _after_postgeneration(cls, obj, _create, _results):
obj._room_level = {}
class AdminFactory(UserFactory):
""" Create an Admin user """
rank = 10
class PrivilegeFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.Privilege
level = 1
room = factory.SubFactory(RoomFactory)
user = factory.SubFactory(UserFactory)
class UserWithRoomFactory(UserFactory):
""" User with a new room """
room = factory.SubFactory(RoomFactory)
def user_with_room_privilege(level=1, **kwargs):
""" Return a User with privileges for a room """
user = UserWithRoomFactory(**kwargs)
PrivilegeFactory(user=user, room=user.room, level=level)
return user
| Nickito12/stepmania-server | test/factories/user_factory.py | Python | mit | 1,150 | 0.00087 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bandit'
copyright = u'2015, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme_options = {}
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| austin987/bandit | docs/source/conf.py | Python | apache-2.0 | 2,480 | 0 |
# pylint: disable=unused-import, unused-variable, missing-docstring
def _readline():
try:
import readline
except ImportError:
print("Module readline not available.")
else:
import rlcompleter
readline.parse_and_bind("tab: complete")
import os
histfile = os.path.join(os.environ["HOME"], 'python', '.history')
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
del os, histfile
_readline()
del _readline
import sys
sys.ps1 = "\001\033[01;33m\002>>>\001\033[00m\002 "
sys.ps2 = "\001\033[01;33m\002...\001\033[00m\002 "
| rwstauner/run_control | python/startup.py | Python | mit | 665 | 0.010526 |
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
#root of project: ...../src
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# This is a non-secret key. A different key is used in the productions settings file.
SECRET_KEY = '8(@^b-s07o7a(*durcp#sx!-8=cnq2-shiq61!7nznn=h$az7n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# ALLOWED_HOSTS = [www.hackerspace.sd72.bc.ca, hackerspace.sd72.bc.ca]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'timberline.hackerspace@gmail.com'
EMAIL_HOST_PASSWORD =""
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images) ####################
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# The absolute path to the directory where collectstatic will collect static files for deployment.
# Set in production settings for deployment
STATIC_ROOT = "/home/couture/www/hackerspace/static"
# STATIC_ROOT = "/home/90158/www/hackerspace/static"
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_in_project", "static_root"),
# '/var/www/static/',
)
MEDIA_URL = "/media/"
# The absolute path to the directory where collectstatic will collect static files for deployment.
# Set properly in production settings for deployment
MEDIA_ROOT = "/home/couture/www/hackerspace/media"
# MEDIA_ROOT = "/home/90158/www/hackerspace/media"
# END STATIC #######################################
| kinglyduck/hackerspace | src/hackerspace_online/settings/production.py | Python | gpl-2.0 | 1,941 | 0.003606 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rocblas(CMakePackage):
"""Radeon Open Compute BLAS library"""
homepage = "https://github.com/ROCmSoftwarePlatform/rocBLAS/"
url = "https://github.com/ROCmSoftwarePlatform/rocBLAS/archive/rocm-3.5.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('3.10.0', sha256='9bfd0cf99662192b1ac105ab387531cfa9338ae615db80ed690c6a14d987e0e8')
version('3.9.0', sha256='3ecd2d9fd2be0e1697a191d143a2d447b53a91ae01afb50231d591136ad5e2fe')
version('3.8.0', sha256='568a9da0360349b1b134d74cc67cbb69b43c06eeca7c33b50072cd26cd3d8900')
version('3.7.0', sha256='9425db5f8e8b6f7fb172d09e2a360025b63a4e54414607709efc5acb28819642')
version('3.5.0', sha256='8560fabef7f13e8d67da997de2295399f6ec595edfd77e452978c140d5f936f0')
tensile_architecture = ('all', 'gfx803', 'gfx900', 'gfx906', 'gfx908')
variant('tensile_architecture', default='all', values=tensile_architecture, multi=False)
depends_on('cmake@3:', type='build')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0']:
depends_on('rocm-cmake@' + ver, type='build', when='@' + ver)
depends_on('rocm-device-libs@' + ver, type='build', when='@' + ver)
depends_on('hip@' + ver, when='@' + ver)
depends_on('comgr@' + ver, type='build', when='@' + ver)
# used in Tensile
depends_on('rocm-smi@' + ver, type='build', when='@' + ver)
depends_on('llvm-amdgpu@' + ver, type='build', when='@' + ver)
# This is the default library format since 3.7.0
depends_on('msgpack-c@3:', when='@3.7:')
depends_on('python', type='build')
depends_on('py-virtualenv', type='build')
depends_on('perl-file-which', type='build')
depends_on('py-pyyaml', type='build')
depends_on('py-wheel', type='build')
depends_on('py-msgpack', type='build')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='f842a1a4427624eff6cbddb2405c36dec9a210cd',
when='@3.5.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='af71ea890a893e647bf2cf4571a90297d65689ca',
when='@3.7.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='9123205f9b5f95c96ff955695e942d2c3b321cbf',
when='@3.8.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='b68edc65aaeed08c71b2b8622f69f83498b57d7a',
when='@3.9.0')
resource(name='Tensile',
git='https://github.com/ROCmSoftwarePlatform/Tensile.git',
commit='ab44bf46b609b5a40053f310bef2ab7511f726ae',
when='@3.10.0')
# Status: https://github.com/ROCmSoftwarePlatform/Tensile/commit/a488f7dadba34f84b9658ba92ce9ec5a0615a087
# Not yet landed in 3.7.0, nor 3.8.0.
patch('0001-Fix-compilation-error-with-StringRef-to-basic-string.patch', when='@:3.8')
def setup_build_environment(self, env):
env.set('CXX', self.spec['hip'].hipcc)
def cmake_args(self):
arch = self.spec.variants['tensile_architecture'].value
tensile = join_path(self.stage.source_path, 'Tensile')
args = [
'-Damd_comgr_DIR={0}'.format(self.spec['comgr'].prefix),
'-DBUILD_CLIENTS_TESTS=OFF',
'-DBUILD_CLIENTS_BENCHMARKS=OFF',
'-DBUILD_CLIENTS_SAMPLES=OFF',
'-DRUN_HEADER_TESTING=OFF',
'-DBUILD_WITH_TENSILE=ON',
'-DTensile_TEST_LOCAL_PATH={0}'.format(tensile),
'-DTensile_COMPILER=hipcc',
'-DTensile_ARCHITECTURE={0}'.format(arch),
'-DTensile_LOGIC=asm_full',
'-DTensile_CODE_OBJECT_VERSION=V3',
'-DBUILD_WITH_TENSILE_HOST={0}'.format(
'ON' if '@3.7.0:' in self.spec else 'OFF'
)
]
if '@3.7.0:' in self.spec:
args.append('-DTensile_LIBRARY_FORMAT=msgpack')
return args
| iulian787/spack | var/spack/repos/builtin/packages/rocblas/package.py | Python | lgpl-2.1 | 4,299 | 0.002326 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'oedit'."""
from primaires.interpreteur.commande.commande import Commande
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.objet.editeurs.oedit.presentation import EdtPresentation
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
class CmdOedit(Commande):
"""Commande 'oedit'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "oedit", "oedit")
self.groupe = "administrateur"
self.schema = "<ident>"
self.nom_categorie = "batisseur"
self.aide_courte = "ouvre l'éditeur d'objet"
self.aide_longue = \
"Cette commande permet d'accéder à l'éditeur d'objet. Elle " \
"prend en paramètre l'identifiant de l'objet (que des " \
"minuscules, des chiffres et le signe |ent|_|ff|). Si l'objet " \
"n'existe pas, il est créé."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
ident_objet = dic_masques["ident"].ident
if ident_objet in type(self).importeur.objet.prototypes:
prototype = type(self).importeur.objet.prototypes[ident_objet]
enveloppe = EnveloppeObjet(EdtPresentation, prototype, "")
contexte = enveloppe.construire(personnage)
personnage.contextes.ajouter(contexte)
contexte.actualiser()
else:
editeur = type(self).importeur.interpreteur.construire_editeur(
"oedit", personnage, ident_objet)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| stormi/tsunami | src/primaires/objet/commandes/oedit/__init__.py | Python | bsd-3-clause | 3,339 | 0.002402 |
"""
WSGI config for SimpleNote project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SimpleNote.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| MaximeRaynal/SimpleNote | src/SimpleNote/wsgi.py | Python | mit | 395 | 0.002532 |
import unittest
from gourmet.importers import importer
class TestImporter (unittest.TestCase):
def setUp (self):
self.i = importer.Importer()
def _get_last_rec_ (self):
return self.i.added_recs[-1]
def testRecImport (self):
self.i.start_rec()
attrs = [('title','Foo'),('cuisine','Bar'),('yields',3),('yield_unit','cups')]
for att,val in attrs:
self.i.rec[att] = val
self.i.commit_rec()
rec = self._get_last_rec_()
for att,val in attrs:
self.assertEqual(getattr(rec,att),val)
def testIngredientImport (self):
self.i.start_rec()
self.i.rec['title']='Ingredient Import Test'
self.i.start_ing()
self.i.add_amt(2)
self.i.add_unit('cups')
self.i.add_item('water')
self.i.commit_ing()
self.i.commit_rec()
ings = self.i.rd.get_ings(self._get_last_rec_())
self.assertEqual(len(ings),1)
ing = ings[0]
self.assertEqual(ing.amount,2)
self.assertEqual(ing.unit,'cups')
self.assertEqual(ing.item,'water')
if __name__ == '__main__':
unittest.main()
| kirienko/gourmet | tests/test_importer.py | Python | gpl-2.0 | 1,162 | 0.018933 |
import json
from dateutil import parser as datetime_parser
from occam.app import get_redis
from occam.runtime import OCCAM_SERVER_CONFIG_KEY
def get_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
return servers.items()
def iterate_servers():
redis = get_redis()
servers = json.loads(redis.get(OCCAM_SERVER_CONFIG_KEY))
for server_name, server_location in servers.iteritems():
yield server_name, server_location
def sorted_by_time_element(l, element_getter=None):
if not element_getter:
element_getter = lambda x: x
key_getter = lambda x: datetime_parser.parse(element_getter(x))
return sorted(l, key=key_getter)
| Yelp/occam | occam/util.py | Python | mit | 715 | 0.004196 |
### import ####################################################################
import os
import time
import pathlib
import shutil
import collections
import appdirs
import toml
import numpy as np
from PySide2 import QtWidgets
import WrightTools as wt
import attune
import pycmds.project.project_globals as g
import pycmds.project.widgets as pw
import pycmds.project.classes as pc
from pycmds.hardware import hardware as hw
from pycmds.hardware.opas.PoyntingCorrection.ZaberCorrectionDevice import ZaberCorrectionDevice
### driver ####################################################################
class Driver(hw.Driver):
def __init__(self, *args, **kwargs):
self.index = kwargs["index"]
self.motor_positions = collections.OrderedDict()
self.homeable = {} # TODO:
self.poynting_type = kwargs.pop("poynting_type", None)
self.poynting_correction = None
hw.Driver.__init__(self, *args, **kwargs)
if not hasattr(self, "motor_names"): # for virtual...
self.motor_names = ["Delay", "Crystal", "Mixer"]
if not hasattr(self, "curve_paths"): # for virtual...
self.curve_paths = collections.OrderedDict()
if not hasattr(self, "interaction_string_combo"): # for virtual...
self.interaction_string_combo = pc.Combo(allowed_values=["sig"])
if self.poynting_type is not None:
self.motor_names += ["Phi", "Theta"] # TODO: Generalize
self.curve = None
# poynting correction
if self.poynting_type == "zaber":
self.poynting_correction = ZaberCorrectionDevice(
kwargs.pop("poynting_port"), kwargs.pop("poynting_indexes")
)
else:
self.poynting_correction = None
self.poynting_type = None
if self.poynting_correction:
self.curve_paths["Poynting"] = pc.Filepath(initial_value=self.poynting_curve_path)
if self.model == "Virtual":
self.load_curve()
def _home_motors(self, motor_names):
raise NotImplementedError
def _load_curve(self, interaction):
if self.model == "Virtual":
colors = np.linspace(400, 10000, 17)
motors = []
motors.append(attune.Dependent(((colors - 500) / 1e4) ** 2, "Delay"))
motors.append(attune.Dependent(-((colors - 90) ** 0.25), "Crystal"))
motors.append(attune.Dependent((colors - 30) ** 0.25, "Mixer"))
name = "curve"
interaction = "sig"
kind = "Virtual"
colors = attune.Setpoints(colors, "Colors", units="nm")
self.curve = attune.Curve(colors, motors, name, interaction, kind)
self.curve.convert(self.native_units)
else:
raise NotImplementedError
def _set_motors(self, motor_destinations):
if self.model == "Virtual":
# Virtual hardware, just set the position directly
for k, v in motor_destinations.items():
self.motor_positions[k].write(v)
else:
raise NotImplementedError
def _update_api(self, interaction):
pass
def _wait_until_still(self, inputs=[]):
while self.is_busy():
time.sleep(
0.1
) # I've experienced hard crashes when wait set to 0.01 - Blaise 2015.12.30
self.get_motor_positions()
self.get_motor_positions()
def get_position(self):
position = self.hardware.destination.read()
self.position.write(position, self.native_units)
return position
def get_motor_positions(self):
pass
def home_all(self, inputs=[]):
names = [i for i in self.motor_names if self.homeable.get(i)]
if self.poynting_correction:
self.poynting_correction.home()
for n in self.poynting_correction.motor_names:
names.pop(n, None)
self._home_motors(names)
def home_motor(self, inputs):
# TODO: clean up for new inputs behavior
motor_name = inputs[0]
if self.poynting_correction:
if motor_name in self.poynting_correction.motor_names:
self.poynting_correction.home(motor_name)
return
if self.homeable.get(motor_name):
self._home_motors([motor_name])
def initialize(self):
# virtual stuff
if self.model == "Virtual":
self.motor_positions["Delay"] = pc.Number(0.0, display=True)
self.motor_positions["Crystal"] = pc.Number(0.0, display=True)
self.motor_positions["Mixer"] = pc.Number(0.0, display=True)
if self.poynting_correction:
# initialize
self.poynting_correction.initialize(self)
for name in self.poynting_correction.motor_names:
self.homeable[name] = True
number = self.poynting_correction.motor_positions[name]
self.motor_positions[name] = number
self.recorded[self.name + "_" + name] = [number, None, 1.0, name]
# get position
self.load_curve()
self.get_motor_positions()
self.get_position()
hw.Driver.initialize(self)
def load_curve(self, name=None, path=None, update=True):
interaction = self.interaction_string_combo.read()
# update curve_paths
if name is not None:
old_directory = os.path.dirname(str(self.curve_paths[name]))
p = shutil.copy(path, old_directory)
self.curve_paths[name].write(os.path.abspath(p))
# remake own curve object/
curve = self._load_curve(interaction)
if self.poynting_correction:
p = self.curve_paths["Poynting"].read()
self.curve = attune.Curve.read(p, subcurve=curve)
self.curve.kind = "poynting"
self.save_status()
self.curve.convert(self.native_units)
# update limits
self.limits.write(*self.curve.get_limits(), self.native_units)
if update:
self._update_api(interaction)
def set_motor(self, motor_name, destination, wait=True):
if self.poynting_correction:
if motor_name in self.poynting_correction.motor_names:
self.poynting_correction.set_motor(motor_name, destination)
return
self._set_motors({motor_name: destination})
if wait:
self.wait_until_still()
def set_motors(self, motor_names, motor_positions, wait=True):
destinations = {n: p for n, p in zip(motor_names, motor_positions)}
if self.poynting_correction:
for name, pos in zip(motor_names, motor_positions):
if name in self.poynting_correction.motor_names:
self.poynting_correction.set_motor(name, pos)
destinations.pop(name)
self._set_motors(destinations)
if wait:
self.wait_until_still()
def set_position(self, destination):
# coerce destination to be within current tune range
destination = np.clip(destination, *self.curve.get_limits())
# get destinations from curve
motor_destinations = self.curve(destination, self.native_units)
# poynting
if self.poynting_correction:
for m in self.poynting_correction.motor_names:
self.poynting_correction.set_motor(m, motor_destinations.pop(m))
# OPA
self._set_motors(motor_destinations)
time.sleep(0.01)
# finish
self.wait_until_still()
self.get_position()
self.save_status()
def set_position_except(self, destination, exceptions):
"""
set position, except for motors that follow
does not wait until still...
"""
self.hardware.destination.write(destination, self.native_units)
self.position.write(destination, self.native_units)
motor_destinations = self.curve(destination, self.native_units)
for e in exceptions:
motor_destinations.pop(e, None)
if self.poynting_correction:
for m in self.poynting_correction.motor_names:
if m in motor_destinations:
self.poynting_correction.set_motor(m, motor_destinations.pop(m))
self._set_motors(motor_destinations)
def wait_until_still(self):
self._wait_until_still()
if self.poynting_correction:
self.poynting_correction.wait_until_still()
self.get_motor_positions()
def get_state(self):
state = super().get_state()
if self.poynting_correction:
state["poynting_curve_path"] = self.curve_paths["Poynting"].read()
return state
def load_state(self, state):
super().load_state(state)
self.poynting_curve_path = state.get("poynting_curve_path", "")
### gui #######################################################################
class GUI(hw.GUI):
def initialize(self):
# self.hardware.driver.initialize()
# container widget
display_container_widget = QtWidgets.QWidget()
display_container_widget.setLayout(QtWidgets.QVBoxLayout())
display_layout = display_container_widget.layout()
display_layout.setMargin(0)
self.layout.addWidget(display_container_widget)
# plot
self.plot_widget = pw.Plot1D()
self.plot_widget.plot_object.setMouseEnabled(False, False)
self.plot_curve = self.plot_widget.add_scatter()
self.plot_h_line = self.plot_widget.add_infinite_line(angle=0, hide=False)
self.plot_v_line = self.plot_widget.add_infinite_line(angle=90, hide=False)
display_layout.addWidget(self.plot_widget)
# vertical line
line = pw.line("V")
self.layout.addWidget(line)
# container widget / scroll area
settings_container_widget = QtWidgets.QWidget()
settings_scroll_area = pw.scroll_area()
settings_scroll_area.setWidget(settings_container_widget)
settings_scroll_area.setMinimumWidth(300)
settings_scroll_area.setMaximumWidth(300)
settings_container_widget.setLayout(QtWidgets.QVBoxLayout())
settings_layout = settings_container_widget.layout()
settings_layout.setMargin(5)
self.layout.addWidget(settings_scroll_area)
# opa properties
input_table = pw.InputTable()
settings_layout.addWidget(input_table)
# plot control
input_table = pw.InputTable()
input_table.add("Display", None)
self.plot_motor = pc.Combo(allowed_values=self.driver.curve.dependent_names)
self.plot_motor.updated.connect(self.update_plot)
input_table.add("Motor", self.plot_motor)
allowed_values = list(wt.units.energy.keys())
self.plot_units = pc.Combo(
initial_value=self.driver.native_units, allowed_values=allowed_values
)
self.plot_units.updated.connect(self.update_plot)
input_table.add("Units", self.plot_units)
settings_layout.addWidget(input_table)
# curves
input_table = pw.InputTable()
input_table.add("Curves", None)
for name, obj in self.driver.curve_paths.items():
input_table.add(name, obj)
obj.updated.connect(self.on_curve_paths_updated)
input_table.add("Interaction String", self.driver.interaction_string_combo)
# limits
limits = pc.NumberLimits() # units None
self.low_energy_limit_display = pc.Number(
units=self.driver.native_units, display=True, limits=limits
)
input_table.add("Low Energy Limit", self.low_energy_limit_display)
self.high_energy_limit_display = pc.Number(
units=self.driver.native_units, display=True, limits=limits
)
input_table.add("High Energy LImit", self.high_energy_limit_display)
settings_layout.addWidget(input_table)
self.driver.limits.updated.connect(self.on_limits_updated)
# motors
input_table = pw.InputTable()
input_table.add("Motors", None)
settings_layout.addWidget(input_table)
for motor_name, motor_mutex in self.driver.motor_positions.items():
settings_layout.addWidget(MotorControlGUI(motor_name, motor_mutex, self.driver))
self.home_all_button = pw.SetButton("HOME ALL", "advanced")
settings_layout.addWidget(self.home_all_button)
homeable = any(self.driver.homeable)
self.home_all_button.clicked.connect(self.on_home_all)
g.queue_control.disable_when_true(self.home_all_button)
# poynting manual mode
if self.driver.poynting_correction:
self.poynting_manual_control = pc.Bool()
input_table = pw.InputTable()
input_table.add("Poynting Control", self.poynting_manual_control)
self.poynting_manual_control.updated.connect(self.on_poynting_manual_control_updated)
settings_layout.addWidget(input_table)
# stretch
settings_layout.addStretch(1)
# signals and slots
self.driver.interaction_string_combo.updated.connect(self.update_plot)
self.driver.update_ui.connect(self.update)
# finish
self.update()
self.update_plot()
self.on_limits_updated()
def update(self):
# set button disable
if self.driver.busy.read():
self.home_all_button.setDisabled(True)
for motor_mutex in self.driver.motor_positions.values():
motor_mutex.set_disabled(True)
else:
self.home_all_button.setDisabled(False)
for motor_mutex in self.driver.motor_positions.values():
motor_mutex.set_disabled(False)
# update destination motor positions
# TODO:
# update plot lines
motor_name = self.plot_motor.read()
try:
motor_position = self.driver.motor_positions[motor_name].read()
self.plot_h_line.setValue(motor_position)
except:
pass
units = self.plot_units.read()
self.plot_v_line.setValue(self.driver.position.read(units))
def update_plot(self):
# units
units = self.plot_units.read()
# xi
colors = self.driver.curve.setpoints[:]
xi = wt.units.converter(colors, self.driver.curve.setpoints.units, units)
# yi
self.plot_motor.set_allowed_values(
self.driver.curve.dependent_names
) # can be done on initialization?
motor_name = self.plot_motor.read()
yi = self.driver.curve(xi, units)[motor_name]
self.plot_widget.set_labels(xlabel=units, ylabel=motor_name)
self.plot_curve.clear()
try:
self.plot_curve.setData(xi, yi)
except ValueError:
pass
self.plot_widget.graphics_layout.update()
self.update()
self.plot_motor.set_allowed_values(self.driver.curve.dependent_names)
def on_curve_paths_updated(self):
self.driver.load_curve() # TODO: better
self.update_plot()
def on_home_all(self):
self.hardware.q.push("home_all")
def on_limits_updated(self):
low_energy_limit, high_energy_limit = self.driver.limits.read("wn")
self.low_energy_limit_display.write(low_energy_limit, "wn")
self.high_energy_limit_display.write(high_energy_limit, "wn")
def on_poynting_manual_control_updated(self):
if self.poynting_manual_control.read():
self.driver.poynting_correction.port.setMode("manual")
else:
self.driver.poynting_correction.port.setMode("computer")
class MotorControlGUI(QtWidgets.QWidget):
def __init__(self, motor_name, motor_mutex, driver):
QtWidgets.QWidget.__init__(self)
self.motor_name = motor_name
self.driver = driver
self.hardware = driver.hardware
self.layout = QtWidgets.QVBoxLayout()
self.layout.setMargin(0)
# table
input_table = pw.InputTable()
input_table.add(motor_name, motor_mutex)
self.destination = motor_mutex.associate(display=False)
input_table.add("Dest. " + motor_name, self.destination)
self.layout.addWidget(input_table)
# buttons
home_button, set_button = self.add_buttons(self.layout, "HOME", "advanced", "SET", "set")
home_button.clicked.connect(self.on_home)
set_button.clicked.connect(self.on_set)
g.queue_control.disable_when_true(home_button)
g.queue_control.disable_when_true(set_button)
# finish
self.setLayout(self.layout)
def add_buttons(self, layout, button1_text, button1_color, button2_text, button2_color):
colors = g.colors_dict.read()
# layout
button_container = QtWidgets.QWidget()
button_container.setLayout(QtWidgets.QHBoxLayout())
button_container.layout().setMargin(0)
# button1
button1 = QtWidgets.QPushButton()
button1.setText(button1_text)
button1.setMinimumHeight(25)
StyleSheet = "QPushButton{background:custom_color; border-width:0px; border-radius: 0px; font: bold 14px}".replace(
"custom_color", colors[button1_color]
)
button1.setStyleSheet(StyleSheet)
button_container.layout().addWidget(button1)
g.queue_control.disable_when_true(button1)
# button2
button2 = QtWidgets.QPushButton()
button2.setText(button2_text)
button2.setMinimumHeight(25)
StyleSheet = "QPushButton{background:custom_color; border-width:0px; border-radius: 0px; font: bold 14px}".replace(
"custom_color", colors[button2_color]
)
button2.setStyleSheet(StyleSheet)
button_container.layout().addWidget(button2)
g.queue_control.disable_when_true(button2)
# finish
layout.addWidget(button_container)
return [button1, button2]
def on_home(self):
self.driver.hardware.q.push("home_motor", [self.motor_name])
def on_set(self):
destination = self.destination.read()
self.hardware.set_motor(self.motor_name, destination)
### hardware ##################################################################
class Hardware(hw.Hardware):
def __init__(self, *arks, **kwargs):
self.kind = "OPA"
hw.Hardware.__init__(self, *arks, **kwargs)
@property
def curve(self):
# TODO: a more thread-safe operation (copy?)
return self.driver.curve
@property
def curve_paths(self):
"""
OrderedDict {name: path}
"""
# TODO: a more thread-safe operation
return collections.OrderedDict(
{key: value.read() for key, value in self.driver.curve_paths.items()}
)
def get_tune_points(self, units="native"):
if units == "native":
units = self.native_units
return wt.units.converter(self.curve.setpoints[:], self.curve.setpoints.units, units)
def home_motor(self, motor):
"""
motor list [name]
"""
self.q.push("home_motor", motor)
def load_curve(self, name, path):
self.q.push("load_curve", name, path)
@property
def motor_names(self):
# TODO: a more thread-safe operation
return self.driver.motor_names
def set_motor(self, motor, destination):
self.q.push("set_motor", motor, destination)
### initialize ################################################################
conf = pathlib.Path(appdirs.user_config_dir("pycmds", "pycmds")) / "config.toml"
conf = toml.load(conf)
hardwares, gui, advanced_gui = hw.import_hardwares(
conf.get("hardware", {}).get("opas", {}),
name="OPAs",
Driver=Driver,
GUI=GUI,
Hardware=Hardware,
)
| wright-group/PyCMDS | pycmds/hardware/opas/opas.py | Python | mit | 19,921 | 0.001456 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, unneeded-not
import pytest
import six
from ..match import Match, Matches
from ..pattern import StringPattern, RePattern
from ..formatters import formatters
class TestMatchClass(object):
def test_repr(self):
match1 = Match(1, 3, value="es")
assert repr(match1) == '<es:(1, 3)>'
match2 = Match(0, 4, value="test", private=True, name="abc", tags=['one', 'two'])
assert repr(match2) == '<test:(0, 4)+private+name=abc+tags=[\'one\', \'two\']>'
def test_names(self):
parent = Match(0, 10, name="test")
parent.children.append(Match(0, 10, name="child1", parent=parent))
parent.children.append(Match(0, 10, name="child2", parent=parent))
assert set(parent.names) == set(["child1", "child2"])
def test_equality(self):
match1 = Match(1, 3, value="es")
match2 = Match(1, 3, value="es")
other = object()
assert hash(match1) == hash(match2)
assert hash(match1) != hash(other)
assert match1 == match2
assert not match1 == other
def test_inequality(self):
match1 = Match(0, 2, value="te")
match2 = Match(2, 4, value="st")
match3 = Match(0, 2, value="other")
other = object()
assert hash(match1) != hash(match2)
assert hash(match1) != hash(match3)
assert match1 != other
assert match1 != match2
assert match1 != match3
def test_length(self):
match1 = Match(0, 4, value="test")
match2 = Match(0, 2, value="spanIsUsed")
assert len(match1) == 4
assert len(match2) == 2
def test_compare(self):
match1 = Match(0, 2, value="te")
match2 = Match(2, 4, value="st")
other = object()
assert match1 < match2
assert match1 <= match2
assert match2 > match1
assert match2 >= match1
if six.PY3:
with pytest.raises(TypeError):
match1 < other
with pytest.raises(TypeError):
match1 <= other
with pytest.raises(TypeError):
match1 > other
with pytest.raises(TypeError):
match1 >= other
else:
assert match1 < other
assert match1 <= other
assert not match1 > other
assert not match1 >= other
def test_value(self):
match1 = Match(1, 3)
match1.value = "test"
assert match1.value == "test"
class TestMatchesClass(object):
match1 = Match(0, 2, value="te", name="start")
match2 = Match(2, 3, value="s", tags="tag1")
match3 = Match(3, 4, value="t", tags=["tag1", "tag2"])
match4 = Match(2, 4, value="st", name="end")
def test_tag(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
assert "start" in matches.names
assert "end" in matches.names
assert "tag1" in matches.tags
assert "tag2" in matches.tags
tag1 = matches.tagged("tag1")
assert len(tag1) == 2
assert tag1[0] == self.match2
assert tag1[1] == self.match3
tag2 = matches.tagged("tag2")
assert len(tag2) == 1
assert tag2[0] == self.match3
start = matches.named("start")
assert len(start) == 1
assert start[0] == self.match1
end = matches.named("end")
assert len(end) == 1
assert end[0] == self.match4
def test_base(self):
matches = Matches()
matches.append(self.match1)
assert len(matches) == 1
assert repr(matches) == repr([self.match1])
assert list(matches.starting(0)) == [self.match1]
assert list(matches.ending(2)) == [self.match1]
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
assert len(matches) == 4
assert list(matches.starting(2)) == [self.match2, self.match4]
assert list(matches.starting(3)) == [self.match3]
assert list(matches.ending(3)) == [self.match2]
assert list(matches.ending(4)) == [self.match3, self.match4]
assert list(matches.range()) == [self.match1, self.match2, self.match4, self.match3]
assert list(matches.range(0)) == [self.match1, self.match2, self.match4, self.match3]
assert list(matches.range(0, 3)) == [self.match1, self.match2, self.match4]
assert list(matches.range(2, 3)) == [self.match2, self.match4]
assert list(matches.range(3, 4)) == [self.match4, self.match3]
matches.remove(self.match1)
assert len(matches) == 3
assert len(matches.starting(0)) == 0
assert len(matches.ending(2)) == 0
matches.clear()
assert len(matches) == 0
assert len(matches.starting(0)) == 0
assert len(matches.starting(2)) == 0
assert len(matches.starting(3)) == 0
assert len(matches.ending(2)) == 0
assert len(matches.ending(3)) == 0
assert len(matches.ending(4)) == 0
def test_get_slices(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
slice_matches = matches[1:3]
assert isinstance(slice_matches, Matches)
assert len(slice_matches) == 2
assert slice_matches[0] == self.match2
assert slice_matches[1] == self.match3
def test_remove_slices(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
del matches[1:3]
assert len(matches) == 2
assert matches[0] == self.match1
assert matches[1] == self.match4
def test_set_slices(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches.append(self.match4)
matches[1:3] = self.match1, self.match4
assert len(matches) == 4
assert matches[0] == self.match1
assert matches[1] == self.match1
assert matches[2] == self.match4
assert matches[3] == self.match4
def test_set_index(self):
matches = Matches()
matches.append(self.match1)
matches.append(self.match2)
matches.append(self.match3)
matches[1] = self.match4
assert len(matches) == 3
assert matches[0] == self.match1
assert matches[1] == self.match4
assert matches[2] == self.match3
def test_constructor(self):
matches = Matches([self.match1, self.match2, self.match3, self.match4])
assert len(matches) == 4
assert list(matches.starting(0)) == [self.match1]
assert list(matches.ending(2)) == [self.match1]
assert list(matches.starting(2)) == [self.match2, self.match4]
assert list(matches.starting(3)) == [self.match3]
assert list(matches.ending(3)) == [self.match2]
assert list(matches.ending(4)) == [self.match3, self.match4]
def test_constructor_kwargs(self):
matches = Matches([self.match1, self.match2, self.match3, self.match4], input_string="test")
assert len(matches) == 4
assert matches.input_string == "test"
assert list(matches.starting(0)) == [self.match1]
assert list(matches.ending(2)) == [self.match1]
assert list(matches.starting(2)) == [self.match2, self.match4]
assert list(matches.starting(3)) == [self.match3]
assert list(matches.ending(3)) == [self.match2]
assert list(matches.ending(4)) == [self.match3, self.match4]
def test_crop(self):
input_string = "abcdefghijklmnopqrstuvwxyz"
match1 = Match(1, 10, input_string=input_string)
match2 = Match(0, 2, input_string=input_string)
match3 = Match(8, 15, input_string=input_string)
ret = match1.crop([match2, match3.span])
assert len(ret) == 1
assert ret[0].span == (2, 8)
assert ret[0].value == "cdefgh"
ret = match1.crop((1, 10))
assert len(ret) == 0
ret = match1.crop((1, 3))
assert len(ret) == 1
assert ret[0].span == (3, 10)
ret = match1.crop((7, 10))
assert len(ret) == 1
assert ret[0].span == (1, 7)
ret = match1.crop((0, 12))
assert len(ret) == 0
ret = match1.crop((4, 6))
assert len(ret) == 2
assert ret[0].span == (1, 4)
assert ret[1].span == (6, 10)
ret = match1.crop([(3, 5), (7, 9)])
assert len(ret) == 3
assert ret[0].span == (1, 3)
assert ret[1].span == (5, 7)
assert ret[2].span == (9, 10)
def test_split(self):
input_string = "123 +word1 - word2 + word3 456"
match = Match(3, len(input_string) - 3, input_string=input_string)
splitted = match.split(" -+")
assert len(splitted) == 3
assert [split.value for split in splitted] == ["word1", "word2", "word3"]
class TestMaches(object):
def test_names(self):
input_string = "One Two Three"
matches = Matches()
matches.extend(StringPattern("One", name="1-str", tags=["One", "str"]).matches(input_string))
matches.extend(RePattern("One", name="1-re", tags=["One", "re"]).matches(input_string))
matches.extend(StringPattern("Two", name="2-str", tags=["Two", "str"]).matches(input_string))
matches.extend(RePattern("Two", name="2-re", tags=["Two", "re"]).matches(input_string))
matches.extend(StringPattern("Three", name="3-str", tags=["Three", "str"]).matches(input_string))
matches.extend(RePattern("Three", name="3-re", tags=["Three", "re"]).matches(input_string))
assert set(matches.names) == set(["1-str", "1-re", "2-str", "2-re", "3-str", "3-re"])
def test_filters(self):
input_string = "One Two Three"
matches = Matches()
matches.extend(StringPattern("One", name="1-str", tags=["One", "str"]).matches(input_string))
matches.extend(RePattern("One", name="1-re", tags=["One", "re"]).matches(input_string))
matches.extend(StringPattern("Two", name="2-str", tags=["Two", "str"]).matches(input_string))
matches.extend(RePattern("Two", name="2-re", tags=["Two", "re"]).matches(input_string))
matches.extend(StringPattern("Three", name="3-str", tags=["Three", "str"]).matches(input_string))
matches.extend(RePattern("Three", name="3-re", tags=["Three", "re"]).matches(input_string))
selection = matches.starting(0)
assert len(selection) == 2
selection = matches.starting(0, lambda m: "str" in m.tags)
assert len(selection) == 1
assert selection[0].pattern.name == "1-str"
selection = matches.ending(7, predicate=lambda m: "str" in m.tags)
assert len(selection) == 1
assert selection[0].pattern.name == "2-str"
selection = matches.previous(matches.named("2-str")[0])
assert len(selection) == 2
assert selection[0].pattern.name == "1-str"
assert selection[1].pattern.name == "1-re"
selection = matches.previous(matches.named("2-str", 0), lambda m: "str" in m.tags)
assert len(selection) == 1
assert selection[0].pattern.name == "1-str"
selection = matches.next(matches.named("2-str", 0))
assert len(selection) == 2
assert selection[0].pattern.name == "3-str"
assert selection[1].pattern.name == "3-re"
selection = matches.next(matches.named("2-str", 0), index=0, predicate=lambda m: "re" in m.tags)
assert selection is not None
assert selection.pattern.name == "3-re"
selection = matches.next(matches.named("2-str", index=0), lambda m: "re" in m.tags)
assert len(selection) == 1
assert selection[0].pattern.name == "3-re"
selection = matches.named("2-str", lambda m: "re" in m.tags)
assert len(selection) == 0
selection = matches.named("2-re", lambda m: "re" in m.tags, 0)
assert selection is not None
assert selection.name == "2-re" # pylint:disable=no-member
selection = matches.named("2-re", lambda m: "re" in m.tags)
assert len(selection) == 1
assert selection[0].name == "2-re"
selection = matches.named("2-re", lambda m: "re" in m.tags, index=1000)
assert selection is None
def test_raw(self):
input_string = "0123456789"
match = Match(0, 10, input_string=input_string, formatter=lambda s: s*2)
assert match.value == match.raw * 2
assert match.raw == input_string
match.raw_end = 9
match.raw_start = 1
assert match.value == match.raw * 2
assert match.raw == input_string[1:9]
match.raw_end = None
match.raw_start = None
assert match.value == match.raw * 2
assert match.raw == input_string
def test_formatter_chain(self):
input_string = "100"
match = Match(0, 3, input_string=input_string, formatter=formatters(int, lambda s: s*2, lambda s: s+10))
assert match.raw == input_string
assert match.value == 100 * 2 + 10
def test_to_dict(self):
input_string = "One Two Two Three"
matches = Matches()
matches.extend(StringPattern("One", name="1", tags=["One", "str"]).matches(input_string))
matches.extend(RePattern("One", name="1", tags=["One", "re"]).matches(input_string))
matches.extend(StringPattern("Two", name="2", tags=["Two", "str"]).matches(input_string))
matches.extend(RePattern("Two", name="2", tags=["Two", "re"]).matches(input_string))
matches.extend(RePattern("Two", name="2", tags=["Two", "reBis"]).matches(input_string))
matches.extend(StringPattern("Three", name="3", tags=["Three", "str"]).matches(input_string))
matches.extend(RePattern("Three", name="3bis", tags=["Three", "re"]).matches(input_string))
matches.extend(RePattern(r"(\w+)", name="words").matches(input_string))
kvalues = matches.to_dict()
assert kvalues == {"1": "One",
"2": "Two",
"3": "Three",
"3bis": "Three",
"words": "One"}
assert kvalues.values_list["words"] == ["One", "Two", "Three"]
kvalues = matches.to_dict(details=True, implicit=True)
assert kvalues["1"].value == "One"
assert len(kvalues["2"]) == 2
assert kvalues["2"][0].value == "Two"
assert kvalues["2"][1].value == "Two"
assert kvalues["3"].value == "Three"
assert kvalues["3bis"].value == "Three"
assert len(kvalues["words"]) == 4
assert kvalues["words"][0].value == "One"
assert kvalues["words"][1].value == "Two"
assert kvalues["words"][2].value == "Two"
assert kvalues["words"][3].value == "Three"
kvalues = matches.to_dict(details=True)
assert kvalues["1"].value == "One"
assert len(kvalues.values_list["2"]) == 2
assert kvalues.values_list["2"][0].value == "Two"
assert kvalues.values_list["2"][1].value == "Two"
assert kvalues["3"].value == "Three"
assert kvalues["3bis"].value == "Three"
assert len(kvalues.values_list["words"]) == 4
assert kvalues.values_list["words"][0].value == "One"
assert kvalues.values_list["words"][1].value == "Two"
assert kvalues.values_list["words"][2].value == "Two"
assert kvalues.values_list["words"][3].value == "Three"
def test_chains(self):
input_string = "wordX 10 20 30 40 wordA, wordB, wordC 70 80 wordX"
matches = Matches(input_string=input_string)
matches.extend(RePattern(r"\d+", name="digit").matches(input_string))
matches.extend(RePattern("[a-zA-Z]+", name="word").matches(input_string))
assert len(matches) == 11
a_start = input_string.find('wordA')
b_start = input_string.find('wordB')
b_end = b_start + len('wordB')
c_start = input_string.find('wordC')
c_end = c_start + len('wordC')
chain_before = matches.chain_before(b_start, " ,", predicate=lambda match: match.name == "word")
assert len(chain_before) == 1
assert chain_before[0].value == 'wordA'
chain_before = matches.chain_before(Match(b_start, b_start), " ,", predicate=lambda match: match.name == "word")
assert len(chain_before) == 1
assert chain_before[0].value == 'wordA'
chain_before = matches.chain_before(b_start, " ,", predicate=lambda match: match.name == "digit")
assert len(chain_before) == 0
chain_before = matches.chain_before(a_start, " ,", predicate=lambda match: match.name == "digit")
assert len(chain_before) == 4
assert [match.value for match in chain_before] == ["40", "30", "20", "10"]
chain_after = matches.chain_after(b_end, " ,", predicate=lambda match: match.name == "word")
assert len(chain_after) == 1
assert chain_after[0].value == 'wordC'
chain_after = matches.chain_after(Match(b_end, b_end), " ,", predicate=lambda match: match.name == "word")
assert len(chain_after) == 1
assert chain_after[0].value == 'wordC'
chain_after = matches.chain_after(b_end, " ,", predicate=lambda match: match.name == "digit")
assert len(chain_after) == 0
chain_after = matches.chain_after(c_end, " ,", predicate=lambda match: match.name == "digit")
assert len(chain_after) == 2
assert [match.value for match in chain_after] == ["70", "80"]
chain_after = matches.chain_after(c_end, " ,", end=10000, predicate=lambda match: match.name == "digit")
assert len(chain_after) == 2
assert [match.value for match in chain_after] == ["70", "80"]
def test_holes(self):
input_string = '1'*10+'2'*10+'3'*10+'4'*10+'5'*10+'6'*10+'7'*10
hole1 = Match(0, 10, input_string=input_string)
hole2 = Match(20, 30, input_string=input_string)
hole3 = Match(30, 40, input_string=input_string)
hole4 = Match(60, 70, input_string=input_string)
matches = Matches([hole1, hole2], input_string=input_string)
matches.append(hole3)
matches.append(hole4)
holes = list(matches.holes())
assert len(holes) == 2
assert holes[0].span == (10, 20)
assert holes[0].value == '2'*10
assert holes[1].span == (40, 60)
assert holes[1].value == '5' * 10 + '6' * 10
holes = list(matches.holes(5, 15))
assert len(holes) == 1
assert holes[0].span == (10, 15)
assert holes[0].value == '2'*5
holes = list(matches.holes(5, 15, formatter=lambda value: "formatted"))
assert len(holes) == 1
assert holes[0].span == (10, 15)
assert holes[0].value == "formatted"
holes = list(matches.holes(5, 15, predicate=lambda hole: False))
assert len(holes) == 0
def test_holes_empty(self):
input_string = "Test hole on empty matches"
matches = Matches(input_string=input_string)
holes = matches.holes()
assert len(holes) == 1
assert holes[0].value == input_string
def test_holes_seps(self):
input_string = "Test hole - with many separators + included"
match = StringPattern("many").matches(input_string)
matches = Matches(match, input_string)
holes = matches.holes()
assert len(holes) == 2
holes = matches.holes(seps="-+")
assert len(holes) == 4
assert [hole.value for hole in holes] == ["Test hole ", " with ", " separators ", " included"]
| bbsan2k/nzbToMedia | libs/rebulk/test/test_match.py | Python | gpl-3.0 | 20,081 | 0.00239 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.tests import TestCase
from superdesk.io.iptc7901 import Iptc7901FileParser
def fixture(filename):
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dirname, 'fixtures', filename)
class IptcTestCase(TestCase):
parser = Iptc7901FileParser()
def open(self, filename):
provider = {'name': 'Test'}
return self.parser.parse_file(fixture(filename), provider)
def test_open_iptc7901_file(self):
with self.app.app_context():
item = self.open('IPTC7901.txt')
self.assertEqual('preformatted', item['type'])
self.assertEqual('062', item['ingest_provider_sequence'])
self.assertEqual('i', item['anpa_category'][0]['qcode'])
self.assertEqual(211, item['word_count'])
self.assertEqual('Germany Social Democrats: Coalition talks with Merkel could fail =', item['headline'])
self.assertRegex(item['body_html'], '^\n Berlin')
self.assertEqual('Germany-politics', item['slugline'])
self.assertEqual(4, item['priority'])
self.assertEqual([{'qcode': 'i'}], item['anpa_category'])
self.assertTrue(item['ednote'].find('## Editorial contacts'))
def test_open_iptc7901_file_odd_charset(self):
with self.app.app_context():
item = self.open('IPTC7901_odd_charset.txt')
self.assertTrue(item['body_html'].find('Müller'))
self.assertTrue(item['ednote'].find('## Editorial contacts'))
def test_map_priority(self):
self.assertEqual(1, self.parser.map_priority("1"))
self.assertEqual(2, self.parser.map_priority("2"))
self.assertEqual(3, self.parser.map_priority("3"))
self.assertEqual(5, self.parser.map_priority("5"))
self.assertEqual(6, self.parser.map_priority("eee"))
self.assertEqual(6, self.parser.map_priority(None))
| plamut/superdesk-core | tests/io/iptc7901_tests.py | Python | agpl-3.0 | 2,239 | 0.000447 |
#!/usr/bin/env python
"""
Assorted utilities for manipulating latitude and longitude values
"""
from __future__ import unicode_literals
__version__ = "1.4"
import math, struct
def signbit(value):
"""
Test whether the sign bit of the given floating-point value is
set. If it is set, this generally means the given value is
negative. However, this is not the same as comparing the value
to C{0.0}. For example:
>>> NEGATIVE_ZERO < 0.0
False
since negative zero is numerically equal to positive zero. But
the sign bit of negative zero is indeed set:
>>> signbit(NEGATIVE_ZERO)
True
>>> signbit(0.0)
False
@type value: float
@param value: a Python (double-precision) float value
@rtype: bool
@return: C{True} if the sign bit of C{value} is set;
C{False} if it is not set.
signbit and doubleToRawLongBits
are from Martin Jansche:
http://symptotic.com/mj/code.html (MIT license).
This is required to capture the difference between -0.0 and 0.0, which is
useful if someone wants to convert a latitude or longitude like:
-0.0degrees, 34minutes to 0d34'00"S
"""
return (doubleToRawLongBits(value) >> 63) == 1
def doubleToRawLongBits(value):
"""
@type value: float
@param value: a Python (double-precision) float value
@rtype: long
@return: the IEEE 754 bit representation (64 bits as a long integer)
of the given double-precision floating-point value.
"""
# pack double into 64 bits, then unpack as long int
return struct.unpack(b'Q', struct.pack(b'd', value))[0]
class LatLongConverter:
@classmethod
def ToDecDeg(self, d=0, m=0, s=0, ustring = False, max=180):
"""
DecDegrees = ToDecDeg(d=0, m=0, s=0)
converts degrees, minutes, seconds to decimal degrees (returned as a Float).
"""
if m < 0 or s < 0:
raise ValueError("Minutes and Seconds have to be positive")
if m > 60.0 or s > 60.0:
raise ValueError("Minutes and Seconds have to be between -180 and 180")
if abs(d) > max:
raise ValueError("Degrees have to be between -180 and 180")
if signbit(d):
Sign = -1
d = abs(d)
else:
Sign = 1
deg_has_fract = bool(math.modf(d)[0])
min_has_fract = bool(math.modf(m)[0])
if deg_has_fract and (m != 0.0 or s != 0.0):
raise ValueError("degrees cannot have fraction unless both minutes"
"and seconds are zero")
if min_has_fract and s != 0.0:
raise ValueError("minutes cannot have fraction unless seconds are zero")
DecDegrees = Sign * (d + m/60.0 + s/3600.0)
if ustring:
return u"%.6f\xb0"%(DecDegrees)
else:
return DecDegrees
@classmethod
def ToDegMin(self, DecDegrees, ustring = False):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes
If the optional parameter: "ustring" is True,
a Unicode string is returned
"""
if signbit(DecDegrees):
Sign = -1
DecDegrees = abs(DecDegrees)
else:
Sign = 1
Degrees = int(DecDegrees)
DecMinutes = round((DecDegrees - Degrees + 1e-14) * 60, 10)# add a tiny bit then round to avoid binary rounding issues
if ustring:
if Sign == 1:
return u"%i\xb0 %.3f'"%(Degrees, DecMinutes)
else:
return u"-%i\xb0 %.3f'"%(Degrees, DecMinutes)
else:
return (Sign*float(Degrees), DecMinutes) # float to preserve -0.0
@classmethod
def ToDegMinSec(self, DecDegrees, ustring = False):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes, Seconds
If the optional parameter: "ustring" is True,
a unicode string is returned
"""
if signbit(DecDegrees):
Sign = -1
DecDegrees = abs(DecDegrees)
else:
Sign = 1
Degrees = int(DecDegrees)
DecMinutes = (DecDegrees - Degrees + 1e-14) * 60 # add a tiny bit to avoid rounding issues
Minutes = int(DecMinutes)
Seconds = round(((DecMinutes - Minutes) * 60), 10 )
if ustring:
if Sign == 1:
return u"%i\xb0 %i' %.2f\""%(Degrees, Minutes, Seconds)
else:
return u"-%i\xb0 %i' %.2f\""%(Degrees, Minutes, Seconds)
else:
return (Sign * float(Degrees), Minutes, Seconds)
## These are classes used in our web apps: ResponseLink, etc.
## They provide a different interface to lat-long format conversion
class Latitude:
"""An object that can interpret a latitude in various formats.
Constructor:
Latitude(deg, min=0.0, sec=0.0, direction=None)
- 'deg' may be between -90.0 and 90.0.
- if 'min' is nonzero, 'deg' cannot have a fractional part.
(This means 5 and 5.0 are acceptable but 5.1 is not.)
- if 'sec' is nonzero, 'deg' and 'min' cannot have fractional parts.
- 'direction' may be a string beginning with 'N' or 'S' (case
insensitive), or None.
- if 'direction' is not None, 'deg' cannot be negative.
Attributes:
.value : a float in decimal degrees. Positive is North; negative is
South. (These apply to zero too; positive zero is North.)
Methods:
.degrees() -> (float, str)
.degrees_minutes() -> (int, float, str)
.degrees_minutes_seconds() -> (int, int, float, str)
The 'str' argument is the direction: "North" or "South".
Example:
>>> lat1 = Latitude(-120.7625)
>>> lat2 = Latitude(-120, 45.7500)
>>> lat3 = Latitude(-120, 45, 45)
>>> lat4 = Latitude(120.7625, direction='South')
>>> lat5 = Latitude(120, 45.7500, direction='S')
>>> lat6 = Latitude(120, 45, 45, direction='south')
>>> (lat1.value == lat2.value == lat3.value == lat4.value ==
... lat5.value == lat6.value)
True
>>> lat1.value
-120.7625
>>> lat1.degrees()
(120.7625, 'South')
>>> lat1.degrees_minutes()
(120, 45.750000000000171, 'South')
>>> lat1.degrees_minutes_seconds()
(120, 45, 45.000000000010232, 'South')
>>> print str(lat1)
Latitude(-120.762500)
"""
negative_direction = "South"
positive_direction = "North"
min = -90.0
max = 90.0
def __init__(self, deg, min=0.0, sec=0.0, direction=None):
ndir = self.negative_direction[0].upper()
pdir = self.positive_direction[0].upper()
if direction:
if deg < 0.0:
msg = "degrees cannot be negative if direction is specified"
raise ValueError(msg)
if direction[0].upper() == pdir:
pass
elif direction[0].upper() == ndir:
deg = -deg
else:
msg = "direction must start with %r or %r" % (pdir, ndir)
raise ValueError(msg)
self.value = LatLongConverter.ToDecDeg(deg, min, sec, max=self.max)
def direction(self):
if self.value < 0.0:
return self.negative_direction
else:
return self.positive_direction
def degrees(self):
deg = abs(self.value)
return deg, self.direction()
def degrees_minutes(self):
deg, min = LatLongConverter.ToDegMin(abs(self.value))
return deg, min, self.direction()
def degrees_minutes_seconds(self):
deg, min, sec = LatLongConverter.ToDegMinSec(abs(self.value))
return deg, min, sec, self.direction()
def __repr__(self):
try:
return "%s(%f)" % (self.__class__.__name__, self.value)
except AttributeError:
return "%s(uninitialized)" % self.__class__.__name__
def format(self, style):
"""
format(style)
returns formatted value as Unicode string with u'\xb0' (degree symbol).
style is one of:
1: decimal degrees
2: degrees, decimal minutes
3: degrees, minutes, seconds
"""
if style == 1:
return u'''%0.2f\xb0 %s''' % self.degrees()
elif style == 2:
return u'''%d\xb0 %0.2f' %s''' % self.degrees_minutes()
elif style == 3:
return u'''%d\xb0 %d' %0.2f" %s''' % self.degrees_minutes_seconds()
else:
raise ValueError("style must be 1, 2, or 3")
def format_html(self, style):
"""
format_html(style)
Backward compatibility for Quixote rlink and Pylons inews.
"""
return self.format(style).replace(u"\xb0", u"°").encode("ascii")
class Longitude(Latitude):
"""See Latitude docstring.
Positive is East; negative is West. Degrees must be between -180.0 and
180.0
"""
negative_direction = "West"
positive_direction = "East"
min = -180.0
max = 180.0
class DummyLatitude:
"""A pseudo-Latitude whose components are None.
Useful in building HTML forms where the value is not required.
Note: this class may be deleted if it doesn't turn out to be useful.
"""
value = None
def direction(self): return None
def degrees(self): return None, None
def degrees_minutes(self): return None, None, None
def degrees_minutes_seconds(self): return None, None, None, None
class DummyLongitude(DummyLatitude):
"""
Note: this class may be deleted if it doesn't turn out to be useful.
"""
pass
## The new simple API -- just methods that do what we need for ResponseLink, etc.
DEGREES = "\xb0" # "DEGREE SIGN"
MINUTES = "\u2032" # "PRIME"
SECONDS = "\u2033" # "DOUBLE PRIME"
LAT_POSITIVE_DIRECTION = "North"
LAT_NEGATIVE_DIRECTION = "South"
LON_POSITIVE_DIRECTION = "East"
LON_NEGATIVE_DIRECTION = "West"
FORMAT1 = "{:.2f}\N{DEGREE SIGN} {}"
FORMAT2 = "{:.0f}\N{DEGREE SIGN} {:.2f}\N{PRIME} {}"
FORMAT3 = "{:.0f}\N{DEGREE SIGN} {:.0f}\N{PRIME} {:.2f}\N{DOUBLE PRIME} {}"
def reduce_base_60(f):
"""extract the base 60 fractional portion of a floating point number.
i.e. minutes from degrees, seconds from minutes.
"""
fract, whole = math.modf(f)
# Add a tiny bit before rounding to avoid binary rounding errors.
fract = abs(fract)
fract = (fract + 1e-14) * 60
fract = round(fract, 10)
return whole, fract
def format_latlon2(f, positive_direction, negative_direction):
direction = positive_direction if f >= 0.0 else negative_direction
degrees, minutes = reduce_base_60(f)
degrees = abs(degrees)
return FORMAT2.format(degrees, minutes, direction)
def format_latlon3(f, positive_direction, negative_direction):
direction = positive_direction if f >= 0.0 else negative_direction
degrees, minutes = reduce_base_60(f)
minutes, seconds = reduce_base_60(minutes)
degrees = abs(degrees)
return FORMAT3.format(degrees, minutes, seconds, direction)
def format_lat(f):
return format_latlon2(f, LAT_POSITIVE_DIRECTION, LAT_NEGATIVE_DIRECTION)
def format_lon(f):
return format_latlon2(f, LON_POSITIVE_DIRECTION, LON_NEGATIVE_DIRECTION)
def format_lat_dms(f):
return format_latlon3(f, LAT_POSITIVE_DIRECTION, LAT_NEGATIVE_DIRECTION)
def format_lon_dms(f):
return format_latlon3(f, LON_POSITIVE_DIRECTION, LON_NEGATIVE_DIRECTION)
| NOAA-ORR-ERD/hazpy.unit_conversion | hazpy/unit_conversion/lat_long.py | Python | unlicense | 11,710 | 0.004953 |
import getpass
import statsd
import logging
LOG = logging.getLogger(__name__)
def increment_as_user(*label_components):
try:
statsd.increment(assemble_label(label_components, getpass.getuser()))
statsd.increment(assemble_label(label_components, 'total'))
except:
LOG.exception('failed to increment as user %s', label_components)
def increment(*args, **kwargs):
try:
statsd.increment(*args, **kwargs)
except:
LOG.exception('failed to increment args=%s, kwargs=%s', args, kwargs)
def create_timer(name):
return statsd.StatsdTimer(name)
def assemble_label(rest, tail):
lc = list(rest) + [tail]
return '.'.join(lc)
| genome/flow-core | flow/util/stats.py | Python | agpl-3.0 | 685 | 0.008759 |
# Django settings for wagtaildemo project.
import os
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..', '..')
BASE_DIR = PROJECT_ROOT
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# Default to dummy email backend. Configure dev/production/local backend
# as per https://docs.djangoproject.com/en/dev/topics/email/#email-backends
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'wagtaildemo',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
# Note that with this set to True, Wagtail will fall back on using numeric dates
# in date fields, as opposed to 'friendly' dates like "24 Sep 2013", because
# Python's strptime doesn't support localised month names: https://code.djangoproject.com/ticket/13339
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DATE_FORMAT = 'j F Y'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# ** You would never normally put the SECRET_KEY in a public repository,
# ** however this is a demo app so we're using the default settings.
# ** Don't use this key in any non-demo usage!
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'wq21wtjo3@d_qfjvd-#td!%7gfy2updj2z+nev^k$iy%=m4_tr'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
from django.conf import global_settings
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'wagtaildemo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wagtaildemo.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites', # Wagtail uses its own site management logic
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'taggit',
'modelcluster',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'wagtail.wagtailsites',
'demo',
)
EMAIL_SUBJECT_PREFIX = '[wagtaildemo] '
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2')
# django-compressor settings
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Auth settings
LOGIN_URL = 'django.contrib.auth.views.login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# WAGTAIL SETTINGS
WAGTAIL_SITE_NAME = 'wagtaildemo'
| gasman/wagtaildemo | wagtaildemo/settings/base.py | Python | bsd-3-clause | 6,927 | 0.000866 |
"""TweetyNet model"""
import torch
from torch import nn
from torch.nn import functional as F
class Conv2dTF(nn.Conv2d):
PADDING_METHODS = ('VALID', 'SAME')
"""Conv2d with padding behavior from Tensorflow
adapted from
https://github.com/mlperf/inference/blob/16a5661eea8f0545e04c86029362e22113c2ec09/others/edge/object_detection/ssd_mobilenet/pytorch/utils.py#L40
as referenced in this issue:
https://github.com/pytorch/pytorch/issues/3867#issuecomment-507025011
used to maintain behavior of original implementation of TweetyNet that used Tensorflow 1.0 low-level API
"""
def __init__(self, *args, **kwargs):
super(Conv2dTF, self).__init__(*args, **kwargs)
padding = kwargs.get("padding", "SAME")
if not isinstance(padding, str):
raise TypeError(f"value for 'padding' argument should be a string, one of: {self.PADDING_METHODS}")
padding = padding.upper()
if padding not in self.PADDING_METHODS:
raise ValueError(
f"value for 'padding' argument must be one of '{self.PADDING_METHODS}' but was: {padding}"
)
self.padding = padding
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "VALID":
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
elif self.padding == "SAME":
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation,
groups=self.groups,
)
class TweetyNet(nn.Module):
def __init__(self,
num_classes,
input_shape=(1, 513, 88),
padding='SAME',
conv1_filters=32,
conv1_kernel_size=(5, 5),
conv2_filters=64,
conv2_kernel_size=(5, 5),
pool1_size=(8, 1),
pool1_stride=(8, 1),
pool2_size=(8, 1),
pool2_stride=(8, 1),
hidden_size=None,
rnn_dropout=0.,
num_layers=1,
bidirectional=True,
):
"""initialize TweetyNet model
Parameters
----------
num_classes : int
number of classes to predict, e.g., number of syllable classes in an individual bird's song
input_shape : tuple
with 3 elements corresponding to dimensions of spectrogram windows: (channels, frequency bins, time bins).
i.e. we assume input is a spectrogram and treat it like an image, typically with one channel,
the rows are frequency bins, and the columns are time bins. Default is (1, 513, 88).
padding : str
type of padding to use, one of {"VALID", "SAME"}. Default is "SAME".
conv1_filters : int
Number of filters in first convolutional layer. Default is 32.
conv1_kernel_size : tuple
Size of kernels, i.e. filters, in first convolutional layer. Default is (5, 5).
conv2_filters : int
Number of filters in second convolutional layer. Default is 64.
conv2_kernel_size : tuple
Size of kernels, i.e. filters, in second convolutional layer. Default is (5, 5).
pool1_size : two element tuple of ints
Size of sliding window for first max pooling layer. Default is (1, 8)
pool1_stride : two element tuple of ints
Step size for sliding window of first max pooling layer. Default is (1, 8)
pool2_size : two element tuple of ints
Size of sliding window for second max pooling layer. Default is (1, 8),
pool2_stride : two element tuple of ints
Step size for sliding window of second max pooling layer. Default is (1, 8)
hidden_size : int
number of features in the hidden state ``h``. Default is None,
in which case ``hidden_size`` is set to the dimensionality of the
output of the convolutional neural network. This default maintains
the original behavior of the network.
rnn_dropout : float
If non-zero, introduces a Dropout layer on the outputs of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
num_layers : int
Number of recurrent layers. Default is 1.
bidirectional : bool
If True, make LSTM bidirectional. Default is True.
"""
super().__init__()
self.num_classes = num_classes
self.input_shape = input_shape
self.cnn = nn.Sequential(
Conv2dTF(in_channels=self.input_shape[0],
out_channels=conv1_filters,
kernel_size=conv1_kernel_size,
padding=padding
),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=pool1_size,
stride=pool1_stride),
Conv2dTF(in_channels=conv1_filters,
out_channels=conv2_filters,
kernel_size=conv2_kernel_size,
padding=padding,
),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=pool2_size,
stride=pool2_stride),
)
# determine number of features in output after stacking channels
# we use the same number of features for hidden states
# note self.num_hidden is also used to reshape output of cnn in self.forward method
batch_shape = tuple((1,) + input_shape)
tmp_tensor = torch.rand(batch_shape)
tmp_out = self.cnn(tmp_tensor)
channels_out, freqbins_out = tmp_out.shape[1], tmp_out.shape[2]
self.rnn_input_size = channels_out * freqbins_out
if hidden_size is None:
self.hidden_size = self.rnn_input_size
else:
self.hidden_size = hidden_size
self.rnn = nn.LSTM(input_size=self.rnn_input_size,
hidden_size=self.hidden_size,
num_layers=num_layers,
dropout=rnn_dropout,
bidirectional=bidirectional)
# for self.fc, in_features = hidden_size * 2 because LSTM is bidirectional
# so we get hidden forward + hidden backward as output
self.fc = nn.Linear(in_features=self.hidden_size * 2, out_features=num_classes)
def forward(self, x):
features = self.cnn(x)
# stack channels, to give tensor shape (batch, rnn_input_size, num time bins)
features = features.view(features.shape[0], self.rnn_input_size, -1)
# switch dimensions for feeding to rnn, to (num time bins, batch size, input size)
features = features.permute(2, 0, 1)
rnn_output, _ = self.rnn(features)
# permute back to (batch, time bins, hidden size) to project features down onto number of classes
rnn_output = rnn_output.permute(1, 0, 2)
logits = self.fc(rnn_output)
# permute yet again so that dimension order is (batch, classes, time steps)
# because this is order that loss function expects
return logits.permute(0, 2, 1)
| yardencsGitHub/tf_syllable_segmentation_annotation | src/tweetynet/network.py | Python | bsd-3-clause | 8,302 | 0.00277 |
"""import portalocker
with portalocker.Lock('text.txt', timeout=5) as fh:
fh.write("Sono in testLoxk2.py")
"""
from lockfile import LockFile
lock = LockFile('text.txt')
with lock:
print lock.path, 'is locked.'
with open('text.txt', "a") as file:
file.write("Sono in testLock2.py")
| giulioribe/car-pooling | testLock2.py | Python | gpl-3.0 | 303 | 0 |
# Copyright (c) 2013 Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""
.. module:: decorators
:platform: Unix, Windows
:synopsis: Decorators for SublimePython plugin
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import os
import functools
def debug(f):
@functools.wrap(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
import traceback
with open(os.path.expanduser("~/trace"), "w") as fl:
traceback.print_exc(file=fl)
return wrapped
| leonth/private-configs | sublime-text-3/Packages/SublimePythonIDE/server/decorators.py | Python | mit | 598 | 0.001672 |
import os
import urlparse
import requests
import splinter
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from tests.common.utils import urljoin
class Browser(object):
'''Drives the browser in the functional test'''
def __init__(self, start_url):
imp = os.environ.get('WEBDRIVER', 'firfox')
if imp in ('firefox', 'ff'):
driver = 'firefox'
else:
driver = 'phantomjs'
self.b = splinter.Browser(driver)
self.d = self.b.driver
self.d.set_window_size(1400, 1000)
self.start_url = start_url
def _el(self, selector):
return self.b.find_by_css(selector).first
@property
def title(self):
return self.b.title
@property
def path(self):
return urlparse.urlparse(self.b.url).path
def visit(self, url):
if not url.startswith('http'):
url = urljoin(self.start_url, url)
self.b.visit(url)
def gohome(self):
self.b.visit(self.start_url)
def click_link_by_text(self, text):
self.b.find_link_by_text(text).first.click()
def click_link_by_title(self, title):
self.b.find_by_xpath('//a[@title="%s"]' % title).first.click()
def find_link_by_text(self, text):
return self.b.find_link_by_text(text).first
def element_text(self, selector):
return self._el(selector).text
def element_attr(self, selector, name):
return self._el(selector)._element.get_attribute(name)
def click(self, selector):
self._el(selector).click()
def fill_form(self, form_kvs):
self.b.fill_form(form_kvs)
def find_by_name(self, name):
return self.b.find_by_name(name)
def submit(self, form_sel):
self._el(form_sel)._element.submit()
def submit_by_input_name(self, name):
self.b.find_by_name(name).first._element.submit()
def fill(self, name, value):
self.b.fill(name, value)
def fill_input_by_label(self, label, value):
# TODO: implement this, and use it to locate inputs in tests, instead
# of locating inputs by css selector. This is better for blackbox testing.
pass
def click_btn_with_text(self, text):
# TODO: same as fill_input_by_label
pass
def quit(self):
self.b.quit()
def wait_for_element(self, selector, timeout):
wait = WebDriverWait(self.d, timeout)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
def get_file_content(self, url):
sessionid = self.d.get_cookie('sessionid')['value']
return requests.get(url, cookies={'sessionid': sessionid}).text
| saukrIppl/seahub | tests/ui/driver.py | Python | apache-2.0 | 2,783 | 0.001078 |
# -*- coding: utf-8 -*-
# Mathmaker Lib offers lualatex-printable mathematical objects.
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker Lib.
# Mathmaker Lib is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker Lib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker Lib; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmakerlib.calculus import Number
from mathmakerlib.exceptions import MathmakerLibError, StopCalculation
from mathmakerlib.exceptions import ZeroBipoint, ZeroVector
from mathmakerlib.exceptions import ZeroLengthLineSegment
def test_MathmakerLibError():
"""Check the main mathmakerlib exception."""
with pytest.raises(MathmakerLibError) as excinfo:
raise MathmakerLibError
assert str(excinfo.value) == 'An error occured in Mathmaker Lib'
def test_StopCalculation():
"""Check StopCalculation exception."""
with pytest.raises(StopCalculation) as excinfo:
raise StopCalculation(Number('7.6'))
assert str(excinfo.value) == 'No further calculation can be done on ' \
'Number(\'7.6\').'
def test_ZeroBipoint():
"""Check ZeroBipoint exception."""
with pytest.raises(ZeroBipoint) as excinfo:
raise ZeroBipoint
assert str(excinfo.value) == 'Abusive use of a zero Bipoint.'
def test_ZeroVector():
"""Check ZeroVector exception."""
with pytest.raises(ZeroVector) as excinfo:
raise ZeroVector
assert str(excinfo.value) == 'Abusive use of a zero Vector.'
def test_ZeroLengthLineSegment():
"""Check ZeroLengthLineSegment exception."""
with pytest.raises(ZeroLengthLineSegment) as excinfo:
raise ZeroLengthLineSegment
assert str(excinfo.value) == 'Abusive use of a zero-length LineSegment.'
| nicolashainaux/mathmakerlib | tests/00_main/01_exceptions_test.py | Python | gpl-3.0 | 2,289 | 0 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScraperItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| cprakashagr/PythonClass | src/scraper/scraper/items.py | Python | mit | 286 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .virtual_machine_image_resource import VirtualMachineImageResource
class VirtualMachineImage(VirtualMachineImageResource):
"""Describes a Virtual Machine Image.
:param id: Resource Id
:type id: str
:param name: The name of the resource.
:type name: str
:param location: The supported Azure location of the resource.
:type location: str
:param tags: The tags attached to the resource.
:type tags: dict
:param plan:
:type plan: :class:`PurchasePlan
<azure.mgmt.compute.compute.v2015_06_15.models.PurchasePlan>`
:param os_disk_image:
:type os_disk_image: :class:`OSDiskImage
<azure.mgmt.compute.compute.v2015_06_15.models.OSDiskImage>`
:param data_disk_images:
:type data_disk_images: list of :class:`DataDiskImage
<azure.mgmt.compute.compute.v2015_06_15.models.DataDiskImage>`
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
}
def __init__(self, name, location, id=None, tags=None, plan=None, os_disk_image=None, data_disk_images=None):
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
| SUSE/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2015_06_15/models/virtual_machine_image.py | Python | mit | 2,245 | 0.001782 |
# This file is part of VoltDB.
# Copyright (C) 2008-2018 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# All the commands supported by the Voter application.
import os
@VOLT.Command(description = 'Build the Voter application and catalog.',
options = VOLT.BooleanOption('-C', '--conditional', 'conditional',
'only build when the catalog file is missing'))
def build(runner):
if not runner.opts.conditional or not os.path.exists('voter.jar'):
runner.java.compile('obj', 'src/voter/*.java', 'src/voter/procedures/*.java')
runner.call('volt.compile', '-c', 'obj', '-o', 'voter.jar', 'ddl.sql')
@VOLT.Command(description = 'Clean the Voter build output.')
def clean(runner):
runner.shell('rm', '-rfv', 'obj', 'debugoutput', 'voter.jar', 'voltdbroot')
@VOLT.Server('create',
description = 'Start the Voter VoltDB server.',
command_arguments = 'voter.jar',
classpath = 'obj')
def server(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.AsyncBenchmark', classpath = 'obj',
description = 'Run the Voter asynchronous benchmark.')
def async(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SyncBenchmark', classpath = 'obj',
description = 'Run the Voter synchronous benchmark.')
def sync(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.JDBCBenchmark', classpath = 'obj',
description = 'Run the Voter JDBC benchmark.')
def jdbc(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SimpleBenchmark', classpath = 'obj',
description = 'Run the Voter simple benchmark.')
def simple(runner):
runner.call('build', '-C')
runner.go()
| simonzhangsm/voltdb | tools/voter.d/voter.py | Python | agpl-3.0 | 2,792 | 0.013968 |
#
# AggHelp.py -- help classes for the Agg drawing
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import aggdraw as agg
from ginga import colors
class AggContext(object):
def __init__(self, canvas):
self.canvas = canvas
def set_canvas(self, canvas):
self.canvas = canvas
def get_color(self, color):
if isinstance(color, str):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
return (int(r*255), int(g*255), int(b*255))
def get_pen(self, color, linewidth=1):
# if hasattr(self, 'linestyle'):
# if self.linestyle == 'dash':
# cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
p = agg.Pen(self.get_color(color), width=linewidth)
return p
def get_brush(self, color):
p = agg.Brush(self.get_color(color))
return p
def get_font(self, name, size, color):
color = self.get_color(color)
# TODO: what kind of lookup can we use for this?
filename = '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'
f = agg.Font(color, filename, size=size)
return f
def text_extents(self, text, font):
wd, ht = self.canvas.textsize(text, font)
return wd, ht
#END
| bsipocz/ginga | ginga/aggw/AggHelp.py | Python | bsd-3-clause | 1,635 | 0.005505 |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function, division
from rplibs.six.moves import range # pylint: disable=import-error
from panda3d.core import LVecBase4i, LVecBase4
class ShadowAtlas(object):
""" Please refer to the native C++ implementation for docstrings and comments.
This is just the python implementation, which does not contain documentation! """
def __init__(self, size, tile_size=32):
self._size = size
self._tile_size = tile_size
self._num_used_tiles = 0
self.init_tiles()
def init_tiles(self):
self._num_tiles = self._size // self._tile_size
def row():
return [False for i in range(self._num_tiles)] # pylint: disable=unused-variable
self._flags = [row() for j in range(self._num_tiles)] # pylint: disable=unused-variable
def get_num_used_tiles(self):
return self._num_used_tiles
num_used_tiles = property(get_num_used_tiles)
def get_coverage(self):
return self._num_used_tiles / float(self._num_tiles ** 2)
coverage = property(get_coverage)
def reserve_region(self, x, y, w, h):
self._num_used_tiles += w * h
for x_offset in range(w):
for y_offset in range(h):
self._flags[x + x_offset][y + y_offset] = True
def find_and_reserve_region(self, tile_width, tile_height):
for x in range(self._num_tiles - tile_height + 1):
for y in range(self._num_tiles - tile_width + 1):
if self.region_is_free(x, y, tile_width, tile_height):
self.reserve_region(x, y, tile_width, tile_height)
return LVecBase4i(x, y, tile_width, tile_height)
print("Failed to find a free region of size", tile_width, "x", tile_height)
return LVecBase4i(-1)
def free_region(self, region):
self._num_used_tiles -= region.z * region.w
for x in range(region.z):
for y in range(region.w):
self._flags[region.x + x][region.y + y] = False
def get_tile_size(self):
return self._tile_size
def region_is_free(self, x, y, w, h):
for x_offset in range(w):
for y_offset in range(h):
if self._flags[x + x_offset][y + y_offset]:
return False
return True
def get_required_tiles(self, resolution):
if resolution % self._tile_size != 0:
print("ShadowAtlas: Invalid atlas resolution!")
return
return resolution // self._tile_size
def region_to_uv(self, region):
flt = LVecBase4(region.x, region.y, region.z, region.w)
return flt * (self._tile_size / self._size)
| eswartz/RenderPipeline | rpcore/pynative/shadow_atlas.py | Python | mit | 3,799 | 0.001316 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from onnx import load, checker, NodeProto
def check_model(): # type: () -> None
parser = argparse.ArgumentParser('check-model')
parser.add_argument('model_pb', type=argparse.FileType('rb'))
args = parser.parse_args()
model = load(args.model_pb)
checker.check_model(model)
def check_node(): # type: () -> None
parser = argparse.ArgumentParser('check-node')
parser.add_argument('node_pb', type=argparse.FileType('rb'))
args = parser.parse_args()
node = NodeProto()
node.ParseFromString(args.node_pb.read())
checker.check_node(node)
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/bin/checker.py | Python | apache-2.0 | 749 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ElectionSimulation.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| CSC301H-Fall2013/ElectionSimulation | Code/ElectionSimulationInstaller/ElectionSimulation/manage.py | Python | mit | 261 | 0.003831 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Cppunit(AutotoolsPackage):
"""Obsolete Unit testing framework for C++"""
homepage = "https://wiki.freedesktop.org/www/Software/cppunit/"
url = "http://dev-www.libreoffice.org/src/cppunit-1.13.2.tar.gz"
version('1.13.2', '0eaf8bb1dcf4d16b12bec30d0732370390d35e6f')
| skosukhin/spack | var/spack/repos/builtin/packages/cppunit/package.py | Python | lgpl-2.1 | 1,545 | 0 |
# Copyright (c) 2015 bitlinker@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from Sensor import Sensor
from SensorValue import SensorValue
# Oregon weather station sensor class
class OregonSensor(Sensor):
SENSOR_TYPE_THN132N = 'THN132N'
SENSOR_TYPE_THGN132N = 'THGN132N'
VALUE_BATTERY = 'B'
__type = None
__battery = None
__id = None
__channel = None
def __init__(self, type, id, channel, batteryHigh):
Sensor.__init__(self, type)
self.__type = type
self.__id = id
self.__channel = channel
self.__battery = batteryHigh
def getType(self):
return self.__type
def getBatteryHigh(self):
return self.__battery
def getId(self):
return self.__id
def getChannel(self):
return self.__channel
def getUUID(self):
return self.getName() + self.__id + str(self.__channel)
def getValuesList(self):
result = Sensor.getValuesList(self)
if (self.__battery):
result.append(SensorValue(self.getUUID(), self.VALUE_BATTERY, self.__battery))
return result
| bitlinker/ArduWeather | Software/NarodmonDaemon/OregonSensor.py | Python | mit | 2,140 | 0.001402 |
#coding=utf8
def hello(instr):
bufstr = " helloWorld!"
return (instr + bufstr), 123
if __name__ == "__main__":
k = "yzh"
print hello(k)
| gdefias/StudyC | VS/test_CcallPY/Test_ccallpy/helloWorld.py | Python | gpl-2.0 | 153 | 0.045752 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
VERSION = (1, 5, 0)
__version__ = '.'.join([str(n) for n in VERSION])
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/external/org_mozilla_bleach/bleach/version.py | Python | bsd-2-clause | 136 | 0 |
import re
from django import forms
from sell.models import ShippingData
from django.contrib.localflavor.us.forms import USStateSelect, USZipCodeField
class ShippingDataForm(forms.ModelForm):
state = forms.CharField(widget=USStateSelect)
save_shipping_info = forms.BooleanField(label="Save Shipping Information", widget=forms.CheckboxInput(), required=False)
class Meta:
model = ShippingData
def clean_zip(self):
zip = self.cleaned_data.get("zip", "")
if zip.strip() == "": raise forms.ValidationError("Zip is a required field.")
if not (re.match("[0-9]{5}(-[0-9]{4})?$", zip)): raise forms.ValidationError("Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX")
return zip
def clean(self):
first_name = self.cleaned_data.get("first_name", "")
last_name = self.cleaned_data.get("last_name", "")
country = self.cleaned_data.get("country", "")
street = self.cleaned_data.get("street_address", "")
city = self.cleaned_data.get("city", "")
if first_name.strip() == "": raise forms.ValidationError("First name is a required field.")
if last_name.strip() == "": raise forms.ValidationError("First name is a required field.")
if street.strip() == "": raise forms.ValidationError("Street is a required field.")
if city.strip() == "": raise forms.ValidationError("City is a required field.")
if country.strip() == "": raise forms.ValidationError("Country is a required field.")
return self.cleaned_data
def save_shipping(self):
return self.cleaned_data.get("save_shipping_info", False)
| codepython/CollectorCity-Market-Place | stores/apps/sell/forms.py | Python | apache-2.0 | 1,696 | 0.014741 |
#!/usr/bin/env python3
from math import pi, atan
class ScanSetting(object):
"""docstring for ScanSetting"""
def __init__(self):
super(ScanSetting, self).__init__()
# for scan
self.scan_step = 400 # steps
self.theta_a = pi / 6 # radius between center and laser
self.img_width = 640
self.img_height = 480
self.sensorWidth = 3.67
self.sensorHeight = 2.74 + 0.08
self.focalLength = 3.6
# ######### mockup 2, measure by solidwork###
self.cab_m = self.img_width / 2
self.cab_l = self.img_width / 2
self.cab_r = self.img_width / 2
self.cameraX = 0.0
self.cameraY = 22.28 + 8
self.cameraZ = -174.70
self.laserX_L = -53.61
self.laserY_L = 31.62
self.laserZ_L = -76.47
self.laserX_R = 53.61
self.laserY_R = 31.62
self.laserZ_R = -76.47
self.theta_a = atan(self.laserX_L / self.laserZ_L)
self.MAXLaserRange = 65
self.LaserRangeMergeDistance = 65
self.MINLaserRange = 3
self.MagnitudeThreshold = 3
self.LLaserAdjustment = 0
self.RLaserAdjustment = 0
# for modeling
self.NoiseNeighbors = 50
self.NeighborhoodDistance = 10
self.SegmentationDistance = 2
self.CloseBottom = -1000
self.CloseTop = 1000
| blesscat/flux_line_bot | fluxclient/scanner/scan_settings.py | Python | agpl-3.0 | 1,386 | 0 |
import logging
import hashlib
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to, etag_cache
from pylons.decorators import jsonify
from pylons.i18n.translation import _
from wurdig.lib.base import BaseController, render
log = logging.getLogger(__name__)
class JsController(BaseController):
@jsonify
def _json(self):
translations = {
'Are you positive you want to do that?': _('Are you positive '
'you want to do that?'),
'The item has successfully been deleted.': _('The item has '
'successfully been deleted.'),
'Disapprove': _('Disapprove'),
'The item has successfully been approved.': _('The item has '
'successfully been approved.'),
'Approve': _('Approve'),
'The item has successfully been disapproved.': _('The item has successfully '
'been disapproved.'),
'Your+request+has+been+completed+successfully': _('Your+request+has+been+'
'completed+successfully'),
'An unexpected error has occurred.': _('An unexpected error has occurred.'),
'Enter key word(s)': _('Enter key word(s)')
}
return translations
def translations(self):
json_string = "if(!this.WURDIG) {var WURDIG = {};}WURDIG.translate = %s" % self._json()
etag_cache(key=hashlib.md5(json_string).hexdigest())
response.content_type = 'application/x-javascript; charset=utf-8'
response.cache_control = 'max-age=2592000'
response.pragma = ''
return json_string | leveille/blog.v1 | wurdig/controllers/js.py | Python | mit | 1,892 | 0.006342 |
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# Author: ChenLiang
import channel02
obj = channel02.RedisHelper()
while True:
inp = input('>> ')
if inp == '':
print("当前输入为空, 请重新输入...")
continue
else:
obj.public(inp, 'fm103.7') | smartczm/python-learn | Old-day01-10/s13-day12/pub-sub/publish02.py | Python | gpl-2.0 | 289 | 0.003745 |
"""
We have two special characters. The first character can be represented by one bit 0. The second character can be represented by two bits (10 or 11).
Now given a string represented by several bits. Return whether the last character must be a one-bit character or not. The given string will always end with a zero.
Example 1:
Input:
bits = [1, 0, 0]
Output: True
Explanation:
The only way to decode it is two-bit character and one-bit character. So the last character is one-bit character.
Example 2:
Input:
bits = [1, 1, 1, 0]
Output: False
Explanation:
The only way to decode it is two-bit character and two-bit character. So the last character is NOT one-bit character.
Note:
1 <= len(bits) <= 1000.
bits[i] is always 0 or 1.
"""
class Solution(object):
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
skip_next, curr = False, None
for i in bits:
if skip_next:
skip_next = False
curr = 2
continue
if i == 1:
skip_next = True
curr = 2
else:
skip_next = False
curr = 1
return curr == 1
| franklingu/leetcode-solutions | questions/1-bit-and-2-bit-characters/Solution.py | Python | mit | 1,231 | 0.004874 |
#!/usr/bin/env python
# Shine.Configuration.FileSystem class
# Copyright (C) 2009-2017 CEA
"""Unit test for Shine.Configuration.FileSystem"""
import unittest
import textwrap
import time
from Utils import makeTempFile, setup_tempdirs, clean_tempdirs
from Shine.Configuration.FileSystem import FileSystem, ModelFileIOError, ConfigDeviceNotFoundError
from Shine.Configuration.Exceptions import ConfigException, ConfigInvalidFileSystem
from Shine.Configuration.TargetDevice import TargetDevice
from Shine.Configuration.Backend.Backend import Backend
class FileSystemTest(unittest.TestCase):
def setUp(self):
self._fs = None
self._testfile = None
setup_tempdirs()
def tearDown(self):
# Remove file from cache
if self._fs:
self._fs.unregister()
# Delete the temp cache directory
clean_tempdirs()
def makeConfFileSystem(self, text):
"""
Create a temporary file instance and returns a FileSystem with it.
"""
self._testfile = makeTempFile(text)
fsconf = FileSystem.create_from_model(self._testfile.name)
return fsconf
def testLoadFile(self):
"""create a FileSystem from model example.lmf"""
fs = FileSystem(filename="../conf/models/example.lmf")
self.assertEqual(len(fs.model), 15)
def test_missing_config_file(self):
"""test missing config file detection"""
self.assertRaises(ModelFileIOError, FileSystem, filename="/bad/file")
def testMGSOnly(self):
"""filesystem with only a MGS"""
self._fs = self.makeConfFileSystem("""
fs_name: mgs
nid_map: nodes=foo1 nids=foo1@tcp
mgt: node=foo1 dev=/dev/dummy
""")
self.assertEqual(len(self._fs.model), 3)
def testRouterOnly(self):
"""filesystem with only routers"""
self._fs = self.makeConfFileSystem("""
fs_name: router
nid_map: nodes=foo1 nids=foo1@tcp
router: node=foo1
""")
self.assertEqual(len(self._fs.model), 3)
def testClientOnly(self):
"""filesystem with only clients"""
self._fs = self.makeConfFileSystem("""
fs_name: clients
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp
mgt: node=foo1 dev=/dev/dummy
client: node=foo[2-3]
""")
self.assertEqual(len(self._fs.model), 4)
def testMDTnoMGT(self):
"""filesystem with a MDT and no MGT"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: mdtnomgt
nid_map: nodes=foo1 nids=foo1@tcp
mdt: node=foo1 dev=/dev/dummy
""")
def testOSTnoMGT(self):
"""filesystem with OSTs and no MGT"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: ostnomgt
nid_map: nodes=foo[1,2] nids=foo[1,2]@tcp
ost: node=foo1 dev=/dev/dummy
ost: node=foo2 dev=/dev/dummy
""")
def testMGTandMDTnoOST(self):
"""filesystem with both MGT and MDT and no OST"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: example
nid_map: nodes=foo1 nids=foo1@tcp
mgt: node=foo1 dev=/dev/dummy2
mdt: node=foo1 dev=/dev/dummy1
""")
def testMultipleNidMap(self):
"""filesystem with complex nid setup"""
self._fs = self.makeConfFileSystem("""
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
nid_map: nodes=foo[1-2] nids=foo[1-2]-bone@tcp1
mgt: node=foo1 ha_node=foo2
""")
self.assertEqual(len(self._fs.model), 3)
self.assertEqual(self._fs.get_nid('foo1'), ['foo1@tcp0', 'foo1-bone@tcp1'])
self.assertEqual(self._fs.get_nid('foo2'), ['foo2@tcp0', 'foo2-bone@tcp1'])
def test_unbalanced_nid_map(self):
"""filesystem with nids with several ranges."""
self._fs = self.makeConfFileSystem("""
fs_name: nids
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp
nid_map: nodes=bar[1-3] nids=bar[1-3]@tcp
""")
self.assertEqual(self._fs.get_nid('foo1'), ['foo1@tcp'])
self.assertEqual(self._fs.get_nid('foo2'), ['foo2@tcp'])
self.assertEqual(self._fs.get_nid('bar1'), ['bar1@tcp'])
self.assertEqual(self._fs.get_nid('bar2'), ['bar2@tcp'])
self.assertEqual(self._fs.get_nid('bar3'), ['bar3@tcp'])
def test_big_nid_map_scalable(self):
"""filesystem with nids with several ranges."""
before = time.time()
self._fs = self.makeConfFileSystem("""
fs_name: nids
nid_map: nodes=foo[1-9999] nids=bar[1-9999]@tcp
""")
elapsed = time.time() - before
self.assertTrue(elapsed < 2, "%.2fs exceeds 2s threshold" % elapsed)
self.assertEqual(len(self._fs.nid_map), 9999)
def testNoIndexDefined(self):
"""filesystem with no index set"""
self._fs = self.makeConfFileSystem("""
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2
ost: node=foo1
""")
self.assertEqual(len(self._fs.get('ost')), 2)
self.assertEqual(self._fs.get('ost')[0].get('node'), 'foo2')
self.assertEqual(self._fs.get('ost')[0].get('index'), 0)
self.assertEqual(self._fs.get('ost')[1].get('node'), 'foo1')
self.assertEqual(self._fs.get('ost')[1].get('index'), 1)
def testSomeIndexedDefined(self):
"""filesystem with not all indexes set"""
self._fs = self.makeConfFileSystem("""
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2
ost: node=foo1 index=0
""")
self.assertEqual(len(self._fs.get('ost')), 2)
self.assertEqual(self._fs.get('ost')[0].get('node'), 'foo2')
self.assertEqual(self._fs.get('ost')[0].get('index'), 1)
self.assertEqual(self._fs.get('ost')[1].get('node'), 'foo1')
self.assertEqual(self._fs.get('ost')[1].get('index'), 0)
def testSameIndexedDefined(self):
"""filesystem with same index used twice"""
self.assertRaises(ConfigInvalidFileSystem, self.makeConfFileSystem, """
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2 index=0
ost: node=foo1 index=0
""")
def make_fs_with_backend(self, backend, text):
"""
Create a FileSystem instance from text with a specific backend
instance.
"""
self._testfile = makeTempFile(text)
fs = FileSystem(self._testfile.name)
fs.backend = backend
fs.setup_target_devices()
return fs
def test_match_device_simple_ha_node(self):
"""test target.match_device() with a simple ha_node"""
# Dummy backend
class DummyBackend(Backend):
def start(self):
pass
def get_target_devices(self, target, fs_name=None, update_mode=None):
return [TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo2']}),
TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo3']})]
# Test with 1 matching ha_node
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1 ha_node=foo2
""")
self.assertEqual(len(fs.get('mgt')), 1)
# Test with 1 matching ha_node (bis)
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1 ha_node=foo3
""")
self.assertEqual(len(fs.get('mgt')), 1)
# Test without ha_node
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1
""")
fs.setup_target_devices()
# Test with no matching ha_node
self.assertRaises(ConfigDeviceNotFoundError, self.make_fs_with_backend,
DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-4] nids=foo[1-4]@tcp0
mgt: node=foo1 ha_node=foo4
""")
def test_match_device_multiple_ha_node(self):
"""test target.match_device() with a several ha_node"""
# Dummy backend
class DummyBackend(Backend):
def start(self):
pass
def get_target_devices(self, target, fs_name=None, update_mode=None):
return [TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo2', 'foo3']}),
TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo2', 'foo4']})]
# Test with 2 matching ha_nodes
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-4] nids=foo[1-4]@tcp0
mgt: node=foo1 ha_node=foo2 ha_node=foo3
""")
self.assertEqual(len(fs.get('mgt')), 1)
# Test with 1 matching ha_node
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1 ha_node=foo2
""")
self.assertEqual(len(fs.get('mgt')), 2)
# Test without ha_node
fs = self.make_fs_with_backend(DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp0
mgt: node=foo1
""")
self.assertEqual(len(fs.get('mgt')), 2)
def test_backend_same_indexed_defined(self):
"""filesystem with backend and same index used twice"""
# Dummy backend
class DummyBackend(Backend):
def start(self):
pass
def get_target_devices(self, target, fs_name=None, update_mode=None):
return [TargetDevice('mgt', {'node': 'foo1', 'ha_node': ['foo2', 'foo3']}),
TargetDevice('mdt', {'node': 'foo2', 'ha_node': ['foo1', 'foo3']}),
TargetDevice('ost', {'node': 'foo1', 'ha_node': ['foo2', 'foo3']}),
TargetDevice('ost', {'node': 'foo2', 'ha_node': ['foo3', 'foo1']})]
self.assertRaises(ConfigInvalidFileSystem, self.make_fs_with_backend, DummyBackend(), """
fs_name: example
nid_map: nodes=foo[1-2] nids=foo[1-2]@tcp0
mgt: node=foo1
mdt: node=foo2
ost: node=foo2 index=0
ost: node=foo1 index=0
""")
class FileSystemCompareTest(unittest.TestCase):
def setUp(self):
setup_tempdirs()
def tearDown(self):
clean_tempdirs()
def _compare(self, orig, new):
tmpfile = makeTempFile(textwrap.dedent(orig))
origconf = FileSystem(tmpfile.name)
newfile = makeTempFile(textwrap.dedent(new))
newconf = FileSystem(newfile.name)
return origconf.compare(newconf)
def test_forbidden(self):
self.assertRaises(ConfigException, self._compare,
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
""",
"""fs_name: compar2
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
""")
def test_forbidden_target(self):
self.assertRaises(ConfigException, self._compare,
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
ost: node=foo2 dev=/dev/sda jdev=/dev/sdb
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
ost: node=foo2 dev=/dev/sda jdev=/dev/sdc
""")
def test_only_description(self):
actions = self._compare(
"""fs_name: compare
description: foo
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
""",
"""fs_name: compare
description: bar
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
""")
self.assertEqual(len(actions), 1)
self.assertTrue(actions.get('copyconf', False))
def test_no_difference(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(len(actions), 0)
def test_clients_path(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 mode=external
client: node=foo[2,3] mount_path=/mypath
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 mode=external
client: node=foo2 mount_path=/mypath
client: node=foo3 mount_path=/mypath2
""")
self.assertEqual(len(actions), 3)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue('unmount' in actions)
self.assertTrue('mount' in actions)
def test_nid_change(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@o2ib
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(len(actions), 2)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('writeconf', False))
def test_add_ost(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
ost: node=foo4 dev=/dev/sda
""")
self.assertEqual(len(actions), 3)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('format', False))
self.assertTrue(actions.get('start', False))
def test_remove_target(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
ost: node=foo4 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
""")
self.assertEqual(len(actions), 4)
self.assertEqual(actions.get('copyconf'), True)
self.assertEqual(actions.get('writeconf'), True)
self.assertEqual(len(actions.get('stop', [])), 1)
self.assertEqual(len(actions.get('remove', [])), 1)
def test_remove_router(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
router: node=foo1
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
router: node=foo2
""")
self.assertEqual(len(actions), 3)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('stop', False))
self.assertTrue(actions.get('start', False))
def test_mkfs_options(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt_mkfs_options: -m0
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(len(actions), 2)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('reformat', False))
def test_quota_options(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
quota: no
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
quota: yes
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(len(actions), 2)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('tunefs', False))
def test_stripping_options(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
stripe_count: 1
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
stripe_count: 2
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(len(actions), 2)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('tunefs', False))
def test_target_mount_options(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt_mount_options: ro
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(len(actions), 2)
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('restart', False))
def test_client_mount_options_no_clients(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /foo
mgt: node=foo1 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /bar
mgt: node=foo1 dev=/dev/sda
""")
self.assertEqual(actions.keys(), ['copyconf'])
self.assertTrue(actions.get('copyconf', False))
def test_client_mount_options(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /foo
mgt: node=foo1 dev=/dev/sda
client: node=foo2
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /bar
mgt: node=foo1 dev=/dev/sda
client: node=foo2
""")
self.assertEqual(len(actions), 3)
self.assertTrue(actions.get('copyconf', False))
self.assertEqual(len(actions.get('mount', [])), 1)
self.assertEqual(len(actions.get('unmount', [])), 1)
def test_client_mount_path_and_remove(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /foo
mgt: node=foo1 dev=/dev/sda
client: node=foo[2-3]
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /bar
mgt: node=foo1 dev=/dev/sda
client: node=foo2
""")
self.assertEqual(sorted(actions.keys()),
['copyconf', 'mount', 'unmount'])
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('unmount', False))
self.assertTrue(actions.get('mount', False))
def test_per_client_mount_options_update(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /foo
mgt: node=foo1 dev=/dev/sda
client: node=foo2
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mount_path: /foo
mgt: node=foo1 dev=/dev/sda
client: node=foo2 mount_options=ro
""")
self.assertEqual(sorted(actions.keys()),
['copyconf', 'mount', 'unmount'])
self.assertTrue(actions.get('copyconf', False))
self.assertTrue(actions.get('unmount', False))
self.assertTrue(actions.get('mount', False))
def test_update_target_ha_node(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda ha_node=foo1
ost: node=foo3 dev=/dev/sda
""")
self.assertEqual(len(actions), 4)
self.assertEqual(actions.get('copyconf'), True)
self.assertEqual(actions.get('writeconf'), True)
self.assertEqual(len(actions.get('stop', [])), 1)
self.assertEqual(actions.get('stop')[0].dic,
{'node':'foo2', 'dev':'/dev/sda'})
self.assertEqual(len(actions.get('start', [])), 1)
self.assertEqual(actions.get('start')[0].dic,
{'node':'foo2', 'dev':'/dev/sda', 'ha_node':['foo1']})
def test_update_target_tag(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda tag=ost_foo3_dev_sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda tag=ost_fooE_dev_sda
""")
self.assertEqual(len(actions), 1)
self.assertEqual(actions.get('copyconf'), True)
def test_update_nid_map_targets(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
""",
"""fs_name: compare
nid_map: nodes=foo[1-3] nids=foo[1-3]@o2ib
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
""")
self.assertEqual(len(actions), 2)
self.assertEqual(actions.get('copyconf'), True)
self.assertEqual(actions.get('writeconf'), True)
def test_update_nid_map_clients(self):
actions = self._compare(
"""fs_name: compare
nid_map: nodes=foo[1-3] nids=foo[1-3]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
mount_path: /foo
""",
"""fs_name: compare
nid_map: nodes=foo[1-4] nids=foo[1-4]@tcp
mgt: node=foo1 dev=/dev/sda
mdt: node=foo2 dev=/dev/sda
ost: node=foo3 dev=/dev/sda
client: node=foo4
""")
self.assertEqual(len(actions), 2)
self.assertTrue(actions.get('mount', False))
self.assertEqual(actions.get('copyconf'), True)
| bullxpfs/lustre-shine | tests/Configuration/ConfigFileSystemTest.py | Python | gpl-2.0 | 21,146 | 0.003168 |
# Copyright (c) 2017 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from networking_cisco import backwards_compatibility as bc
from networking_cisco.ml2_drivers.nexus import trunk
from neutron.tests.unit.db import test_db_base_plugin_v2
PORT_ID = 'fake_port_id'
TRUNK_ID = 'fake_trunk_id'
DNS_NAME = 'test_dns_name'
VM_NAME = 'test_vm_name'
SEGMENTATION_VLAN = 'vlan'
SEGMENTATION_ID1 = 101
SEGMENTATION_ID2 = 102
SUBPORTS = [
{'segmentation_type': SEGMENTATION_VLAN, 'port_id': PORT_ID,
'segmentation_id': SEGMENTATION_ID1},
{'segmentation_type': SEGMENTATION_VLAN, 'port_id': PORT_ID,
'segmentation_id': SEGMENTATION_ID2}]
TRUNK = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'sub_ports': SUBPORTS,
'name': 'trunk0',
'admin_state_up': 'true',
'tenant_id': 'fake_tenant_id',
'project_id': 'fake_project_id',
'port_id': PORT_ID,
'id': TRUNK_ID,
'description': 'fake trunk port'}
PROFILE_BAREMETAL = [{"switch_info": "test_value"}]
SUBPORT = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'port_id': PORT_ID,
'segmentation_id': SEGMENTATION_ID1}
PORT_BAREMETAL = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'id': PORT_ID,
bc.portbindings.VNIC_TYPE: bc.portbindings.VNIC_BAREMETAL,
bc.dns.DNSNAME: DNS_NAME,
bc.portbindings.PROFILE: {"local_link_information": PROFILE_BAREMETAL},
'trunk_details': {'trunk_id': TRUNK_ID, 'sub_ports': SUBPORTS}}
PORT_VM = {
'status': bc.constants.PORT_STATUS_ACTIVE,
'id': PORT_ID,
bc.portbindings.VNIC_TYPE: bc.portbindings.VNIC_NORMAL,
bc.portbindings.HOST_ID: VM_NAME,
bc.portbindings.PROFILE: {},
'trunk_details': {'trunk_id': TRUNK_ID, 'sub_ports': SUBPORTS}}
class TestSubPort(object):
port_id = PORT_ID
trunk_id = TRUNK_ID
segmentation_type = SEGMENTATION_VLAN
segmentation_id = SEGMENTATION_ID1
class TestTrunk(object):
admin_state_up = 'test_admin_state'
id = TRUNK_ID
tenant_id = 'test_tenant_id'
name = 'test_trunk_name'
port_id = PORT_ID
status = bc.constants.PORT_STATUS_ACTIVE
sub_ports = SUBPORTS
update = mock.Mock()
@testtools.skipIf(bc.NEUTRON_VERSION < bc.NEUTRON_OCATA_VERSION,
"Test not applicable prior to stable/ocata.")
class TestNexusTrunkHandler(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
super(TestNexusTrunkHandler, self).setUp()
self.handler = trunk.NexusMDTrunkHandler()
self.plugin = bc.get_plugin()
self.plugin.get_port = mock.Mock()
self.plugin.update_port = mock.Mock()
self.mock_subport_get_object = mock.patch.object(
bc.trunk_objects.SubPort, 'get_object',
return_value=TestSubPort).start()
self.mock_trunk_get_object = mock.patch.object(
bc.trunk_objects.Trunk, 'get_object',
return_value=TestTrunk).start()
self.mock_trunk_get_object = mock.patch.object(
bc.trunk_objects.Trunk, 'get_object').start()
def _test_update_subports(self, port, host_id):
self.handler.update_subports(port)
self.assertEqual(2, self.plugin.update_port.call_count)
self.plugin.update_port.assert_called_with(mock.ANY, PORT_ID,
{'port':
{bc.portbindings.HOST_ID: host_id,
'device_owner': bc.trunk_consts.TRUNK_SUBPORT_OWNER}})
self.mock_trunk_get_object.called_once_with(mock.ANY, id=TRUNK_ID)
TestTrunk.update.called_once_with(
status=bc.trunk_consts.ACTIVE_STATUS)
self.mock_trunk_get_object.assert_called_once_with(
mock.ANY, id=TRUNK_ID)
def test_is_trunk_parentport(self):
return_value = self.handler.is_trunk_parentport(PORT_VM)
self.assertTrue(return_value)
def test_is_trunk_parentport_no_trunk(self):
PORT_VM_NO_TRUNK = PORT_VM.copy()
del PORT_VM_NO_TRUNK['trunk_details']
return_value = self.handler.is_trunk_parentport(PORT_VM_NO_TRUNK)
self.assertFalse(return_value)
def test_is_trunk_subport(self):
PORT_VM['device_owner'] = bc.trunk_consts.TRUNK_SUBPORT_OWNER
return_value = self.handler.is_trunk_subport(PORT_VM)
self.assertTrue(return_value)
def test_is_trunk_subport_invalid_deviceowner(self):
PORT_VM['device_owner'] = 'fake_owner'
return_value = self.handler.is_trunk_subport(PORT_VM)
self.assertFalse(return_value)
def test_update_subports_baremetal(self):
self._test_update_subports(PORT_BAREMETAL, DNS_NAME)
def test_is_trunk_subport_baremetal(self):
self.plugin.get_port.return_value = PORT_BAREMETAL
return_value = self.handler.is_trunk_subport_baremetal(PORT_BAREMETAL)
self.assertTrue(return_value)
self.mock_subport_get_object.assert_called_once_with(
mock.ANY, port_id=PORT_BAREMETAL['id'])
self.mock_trunk_get_object.assert_called_once_with(
mock.ANY, id=TestSubPort().trunk_id)
def test_is_trunk_subport_baremetal_no_subport(self):
self.mock_subport_get_object.return_value = None
return_value = self.handler.is_trunk_subport_baremetal(PORT_BAREMETAL)
self.assertFalse(return_value)
self.mock_subport_get_object.assert_called_once_with(
mock.ANY, port_id=PORT_BAREMETAL['id'])
self.assertFalse(self.mock_trunk_get_object.call_count)
def test_is_trunk_subport_baremetal_vm_port(self):
self.plugin.get_port.return_value = PORT_VM
return_value = self.handler.is_trunk_subport_baremetal(PORT_VM)
self.assertFalse(return_value)
| noironetworks/networking-cisco | networking_cisco/tests/unit/ml2_drivers/nexus/test_trunk.py | Python | apache-2.0 | 6,254 | 0.00016 |
from test import test_support
import time
import unittest
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_missing_module_attribute(self):
self.assertEqual(time.clock.__module__, 'time')
self.assertEqual(time.time.__module__, 'time')
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_clock(self):
time.clock()
def test_conversions(self):
self.assertTrue(time.ctime(self.t)
== time.asctime(time.localtime(self.t)))
self.assertTrue(long(time.mktime(time.localtime(self.t)))
== long(self.t))
def test_sleep(self):
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
def test_strftime_bounds_checking(self):
# Make sure that strftime() checks the bounds of the various parts
#of the time tuple (0 is valid for *all* values).
# XXX: Jython supports more dates than CPython
if not test_support.is_jython:
# Check year [1900, max(int)]
self.assertRaises(ValueError, time.strftime, '',
(1899, 1, 1, 0, 0, 0, 0, 1, -1))
if time.accept2dyear:
self.assertRaises(ValueError, time.strftime, '',
(-1, 1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(100, 1, 1, 0, 0, 0, 0, 1, -1))
# Check month [1, 12] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default values.
# No test for daylight savings since strftime() does not change output
# based on its value.
if not test_support.is_jython:
expected = "2000 01 01 00 00 00 1 001"
else:
# XXX: Jython doesn't support the "two digits years" hack (turned
# on/off by time.accept2dyears), so year 0 means exactly that
# and it is not converted to 2000.
expected = "0000 01 01 00 00 00 1 001"
result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# throwing an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_empty(self):
try:
time.strptime('', '')
except ValueError:
self.fail('strptime failed on empty args.')
def test_asctime(self):
time.asctime(time.gmtime(self.t))
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
# XXX: Posix compiant asctime should refuse to convert
# year > 9999, but Linux implementation does not.
# self.assertRaises(ValueError, time.asctime,
# (12345, 1, 0, 0, 0, 0, 0, 0, 0))
# XXX: For now, just make sure we don't have a crash:
try:
time.asctime((12345, 1, 1, 0, 0, 0, 0, 1, 0))
except ValueError:
pass
@unittest.skipIf(not hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertTrue(time.tzname[0] == 'AEST', str(time.tzname[0]))
self.assertTrue(time.tzname[1] == 'AEDT', str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif environ.has_key('TZ'):
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(ValueError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertTrue(0 <= (t1-t0) < 0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertTrue(0 <= (t1-t0) < 0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, ValueError):
pass
else:
self.assertEqual(time.mktime(tt), t)
def test_main():
test_support.run_unittest(TimeTestCase)
if __name__ == "__main__":
test_main()
| EnviroCentre/jython-upgrade | jython/lib/test/test_time.py | Python | mit | 10,491 | 0.002288 |
import warnings
from pprint import pprint
import datajoint as dj
import pandas as pd
from djaddon import hdf5
try:
from pupil_tracking import PupilTracker
except ImportError:
warnings.warn("Failed to import pupil_tacking library. You won't be able to populate trk.EyeFrame")
schema = dj.schema('pipeline_pupiltracking', locals())
from . import rf
import numpy as np
import matplotlib.pyplot as plt
from IPython import embed
import glob
@schema
class Roi(dj.Manual):
definition = """
# table that stores the correct ROI of the Eye in the video
->rf.Eye
---
x_roi_min : int # x coordinate of roi
y_roi_min : int # y coordinate of roi
x_roi_max : int # x coordinate of roi
y_roi_max : int # y coordinate of roi
"""
# embed()
@schema
class ParamEyeFrame(dj.Lookup):
definition = """
# table that stores the paths for the params for pupil_tracker
pupil_tracker_param_id : int # id for param collection
---
convex_weight_high = Null : float # parameter for tracking
convex_weight_low = Null : float # parameter for tracking
thres_perc_high = Null : float # parameter for tracking
thres_perc_low = Null : float # parameter for tracking
pupil_left_limit = Null : float # parameter for tracking
pupil_right_limit = Null : float # parameter for tracking
min_radius = Null : float # parameter for tracking
max_radius = Null : float # parameter for tracking
centre_dislocation_penalty : float # parameter for tracking
distance_sq_pow : float # parameter for tracking
"""
contents = [
{'pupil_tracker_param_id': 0, 'convex_weight_high': 0.5, 'convex_weight_low': 0.5, 'thres_perc_high': 99, 'distance_sq_pow': 1,
'thres_perc_low': 1, 'pupil_left_limit': 0.2, 'pupil_right_limit': 0.8, 'min_radius': 5, 'max_radius': 180,
'centre_dislocation_penalty': 0.001},
{'pupil_tracker_param_id': 1, 'convex_weight_high': 0.5, 'convex_weight_low': 0.5, 'thres_perc_high': 98, 'distance_sq_pow': 0.5,
'thres_perc_low': 2, 'pupil_left_limit': 0.2, 'pupil_right_limit': 0.8, 'min_radius': 5, 'max_radius': 180,
'centre_dislocation_penalty': 0.05}
]
@schema
class EyeFrame(dj.Computed):
definition = """
# eye tracking info for each frame of a movie
-> Roi
-> ParamEyeFrame
frame : int # frame number in movie
---
eye_frame_ts=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def populated_from(self):
return Roi()
def _make_tuples(self, key):
print("Populating: ")
# embed()
param = (ParamEyeFrame() & 'pupil_tracker_param_id=1').fetch.as_dict()[0]
# key.update(param)
key['pupil_tracker_param_id'] = param['pupil_tracker_param_id']
pprint(key)
eye_roi = (Roi() & key).fetch1['x_roi_min', 'y_roi_min', 'x_roi_max', 'y_roi_max']
print("Populating for trk.Roi and roi = ", eye_roi)
p, f = (rf.Session() & key).fetch1['hd5_path', 'file_base']
n = (rf.Scan() & key).fetch1['file_num']
avi_path = glob.glob(r"{p}/{f}{n}*.avi".format(f=f, p=p, n=n))
# print(avi_path)
# embed()
assert len(avi_path) == 1, "Found 0 or more than 1 videos: {videos}".format(videos=str(avi_path))
tr = PupilTracker(param)
trace = tr.track_without_svm(avi_path[0], eye_roi)
# CODE to insert data after tracking
print("Tracking complete... Now inserting data to datajoint")
# embed()
efd = EyeFrame.Detection()
for index, data in trace.iterrows():
key['frame'] = index
self.insert1(key)
if pd.notnull(data['pupil_x']):
values = data.to_dict()
values.update(key)
efd.insert1(values)
class Detection(dj.Part):
definition = """
# eye frames with detected eye
->EyeFrame
---
pupil_x : float # pupil x position
pupil_y : float # pupil y position
pupil_r_minor : float # pupil radius minor axis
pupil_r_major : float # pupil radius major axis
pupil_angle : float # angle of major axis vs. horizontal axis in radians
pupil_x_std : float # pupil x position std
pupil_y_std : float # pupil y position std
pupil_r_minor_std : float # pupil radius minor axis std
pupil_r_major_std : float # pupil radius major axis std
pupil_angle_std : float # angle of major axis vs. horizontal axis in radians
intensity_std : float # standard deviation of the ROI pixel values
"""
@schema
class SelectionProtocol(dj.Lookup):
definition = """
# groups of filtering steps to reject bad frames
filter_protocol_id : int # id of the filtering protocol
---
protocol_name : char(50) # descriptive name of the protocol
"""
contents = [
{'filter_protocol_id': 0, 'protocol_name': 'frame_intensity'},
{'filter_protocol_id': 1, 'protocol_name': 'int_and_ran_pupil_x_50_2'},
{'filter_protocol_id': 2, 'protocol_name': 'int_and_ran_pupil_x_75_2'},
{'filter_protocol_id': 3, 'protocol_name': 'int_and_ran_pupil_x_25_2'},
{'filter_protocol_id': 4, 'protocol_name': 'int_and_ran_pupil_pos'},
{'filter_protocol_id': 5, 'protocol_name': 'int_and_ran_pupil_pos_spikes_removed'},
{'filter_protocol_id': 6, 'protocol_name': 'int_and_ran_pupil_pos_spike_filter2'}
]
def apply(self, frames, key):
print("Applying filter with protocol id :", key['filter_protocol_id'])
for step in (ProtocolStep() & key).fetch.order_by('priority').as_dict():
# embed()
print("....for protocol id:", key['filter_protocol_id'], "applying filter with filter_id = ",
step['filter_id'])
frames = FrameSelector().apply(frames, step, param=step['filter_param'])
return frames
@schema
class FrameSelector(dj.Lookup):
definition = """
# single filters to reject frames
filter_id : tinyint # id of the filter
---
filter_name : char(50) # descriptive name of the filter
"""
contents = [
{'filter_id': 0, 'filter_name': 'intensity_filter'},
{'filter_id': 1, 'filter_name': 'ran_pupil_x_th'},
{'filter_id': 2, 'filter_name': 'ran_pupil_pos'},
{'filter_id': 3, 'filter_name': 'spike_filter'},
{'filter_id': 4, 'filter_name': 'spike_filter2'}
]
def apply(self, frames, key, param):
"""
Apply takes a restriction of EyeFrame.Detection() and returns an even more restricted set of frames
:param frames: restriction of EyeFrame.Detection()
:param key: key that singles out a single filter
:param param: parameters to the filter
:return: an even more restricted set of frames
"""
which = (self & key).fetch1['filter_name']
if which == 'intensity_filter':
i = frames.fetch['intensity_std']
th = np.percentile(i, param[0]) / param[1]
return frames & 'intensity_std>{threshold}'.format(threshold=th)
if which == 'ran_pupil_x_th':
i = frames.fetch['pupil_x_std']
th = np.percentile(i, param[0])
return frames & 'pupil_x_std<{threshold}*{param}'.format(threshold=th, param=param[1])
if which == 'ran_pupil_pos':
i = frames.fetch['pupil_x_std']
j = frames.fetch['pupil_y_std']
pos = i*i + j*j
th = np.percentile(pos, param[0])
return frames & '(pupil_x_std*pupil_x_std + pupil_y_std*pupil_y_std)<{threshold}*{param}'.format(threshold=th, param=param[1])
if which == 'spike_filter':
ra = frames.fetch.order_by('frame')['pupil_r_minor']
fr = frames.fetch.order_by('frame')['frame']
slope_coll = []
for i in range(1,ra.size):
slope_coll.append((ra[i] - ra[i-1])/ (fr[i] - fr[i-1]))
slope_coll1 = abs(np.asarray(slope_coll))
frames_rej = [dict(frame=k) for k in fr[np.where(slope_coll1 > param)]]
return frames - frames_rej
if which == 'spike_filter2':
ra = frames.fetch.order_by('frame')['pupil_r_minor']
fr = frames.fetch.order_by('frame')['frame']
fr_rej=[]
for i in range(2, ra.size-2):
avg = (ra[i-2] + ra[i-1] + ra[i+1] + ra[i+2]) / 4
if abs(ra[i] - avg) > param:
fr_rej.append(fr[i])
frames_rej = [dict(frame=k) for k in fr_rej]
return frames - frames_rej
@schema
class ProtocolStep(dj.Lookup):
definition = """
# single filter in a protocol to accept frames
-> SelectionProtocol
-> FrameSelector
priority : int # priority of the filter step, the low the higher the priority
---
filter_param=null : longblob # parameters that are passed to the filter
"""
# define the protocols. Each protocol has one id, but can have several filters
contents = [ # parameter needs to be an array
# protocol 0 contains only one filter and is based on intensity
{'filter_protocol_id': 0, 'filter_id': 0, 'priority': 50, 'filter_param': np.array([50,2])},
# protocol 1 = intensity filter + ransac(50,2)
{'filter_protocol_id': 1, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])},
{'filter_protocol_id': 1, 'filter_id': 1, 'priority': 40, 'filter_param': np.array([50,2])},
# protocol 2 = intensity filter + ransac(75,2)
{'filter_protocol_id': 2, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])},
{'filter_protocol_id': 2, 'filter_id': 1, 'priority': 40, 'filter_param': np.array([75,2])},
# protocol 3 = intensity filter + ransac(25,2)
{'filter_protocol_id': 3, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])},
{'filter_protocol_id': 3, 'filter_id': 1, 'priority': 40, 'filter_param': np.array([25,2])},
# protocol 4 = intensity filter + ransac x2+y2
{'filter_protocol_id': 4, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])},
{'filter_protocol_id': 4, 'filter_id': 2, 'priority': 40, 'filter_param': np.array([97,2])},
# protocol 5 = intensity filter + ransac x2+y2 + spike filter
{'filter_protocol_id': 5, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])},
{'filter_protocol_id': 5, 'filter_id': 2, 'priority': 40, 'filter_param': np.array([97,2])},
{'filter_protocol_id': 5, 'filter_id': 3, 'priority': 50, 'filter_param': np.array(50)},
# protocol 6 = intensity filter + ransac x2+y2 + spike filter2
{'filter_protocol_id': 6, 'filter_id': 0, 'priority': 10, 'filter_param': np.array([50,2])},
{'filter_protocol_id': 6, 'filter_id': 2, 'priority': 40, 'filter_param': np.array([97,2])},
{'filter_protocol_id': 6, 'filter_id': 4, 'priority': 50, 'filter_param': np.array(35)}
]
@schema
class SelectedFrame(dj.Computed):
definition = """
# This schema only contains detected frames that meet a particular quality criterion
-> EyeFrame.Detection
-> SelectionProtocol
---
"""
@property
def populated_from(self):
return rf.Eye() * SelectionProtocol() & EyeFrame()
def _make_tuples(self, key):
print("Key = ", key)
# embed()
frames = EyeFrame.Detection() & key
print('\tLength before filtering: {l}'.format(l=len(frames)))
# & key can be removed from the line below
frames = (SelectionProtocol() & key).apply(frames, key)
print('\tLength after filtering: {l}'.format(l=len(frames)))
for frame_key in frames.project().fetch.as_dict:
key.update(frame_key)
self.insert1(key)
@schema
class Quality(dj.Computed):
definition = """
# quality assessment of tracking using Jake's tracked frames as ground truth
-> rf.Eye
-> SelectionProtocol
---
pos_err : float # mean Euclidean distance between pupil positions
r_corr : float # correlation of radii
excess_frames : int # number of frames detected by tracking but not in Jake's data
missed_frames : int # number of frames detected by Jake but no by tracking
total_frames : int # total number of frames in the video
nan_in_rf : int # nan frames in a video in rf.EyeFrame
"""
@property
def populated_from(self):
return rf.Eye().project() * SelectionProtocol() & EyeFrame().project() & rf.EyeFrame().project() & SelectedFrame().project()
def _make_tuples(self, key):
# TODO: This function needs cleanup. Only keep relevant stuff for computing the comparisons
# TODO: Don't plot in _make_tuples. Make plotting an extra function.
roi_rf = (rf.Eye() & key).fetch['eye_roi']
# print("Debug 1")
# embed()
print("Populating for key= ", key)
pos_errors = np.zeros(len(rf.EyeFrame() & key))
r_errors = np.zeros(len(rf.EyeFrame() & key))
excess_frames = 0
missed_frames = 0
r_rf = []
r_trk = []
p_err = []
total_frames = len(rf.EyeFrame() & key)
miss = []
indexes = []
efi = []
for frame_key in (rf.EyeFrame() & key).project().fetch.as_dict:
# from IPython import embed
# print("Debug 2")
# embed()
if np.isnan((rf.EyeFrame() & frame_key).fetch['pupil_x']):
# cant use fetch1 here as it might error out !!
if (EyeFrame.Detection() & (SelectedFrame() & key) & frame_key).fetch['pupil_x'].shape[0] != 0:
excess_frames += 1
efi.append(frame_key['frame'])
else:
if (EyeFrame.Detection() & frame_key & (SelectedFrame() & key)).fetch['pupil_x'].shape[0] == 0:
missed_frames += 1
# embed()
miss.append(frame_key['frame'])
else:
rx, ry = (rf.EyeFrame() & frame_key).fetch1['pupil_x','pupil_y']
tx, ty = (EyeFrame.Detection() & frame_key).fetch1['pupil_x','pupil_y']
d_x = rx - tx + roi_rf[0][0][0] - 1
d_y = ry - ty + roi_rf[0][0][2] - 1
r_rf.append((rf.EyeFrame() & frame_key).fetch1['pupil_r'])
r_trk.append((EyeFrame.Detection() & frame_key).fetch1['pupil_r_major'])
pos_errors[frame_key['frame']] = pow(d_x, 2) + pow(d_y, 2)
indexes.append(frame_key['frame'])
p_err.append(pow(d_x, 2) + pow(d_y, 2))
if frame_key['frame'] % 1000 is 0:
print("Frame Computing = ", frame_key['frame'], " / ", total_frames)
#embed()
frames_computed = np.sum(~np.isnan((rf.EyeFrame() & key).fetch['pupil_x'])) - missed_frames
# frames_computed = len(np.where(np.isnan((rf.EyeFrame() & key).fetch['pupil_x']) == False)[0]) - missed_frames
key['pos_err'] = pow(np.sum(pos_errors) / frames_computed, 0.5)
key['r_corr'] = np.corrcoef(r_rf, r_trk)[0][1]
key['excess_frames'] = excess_frames
key['missed_frames'] = missed_frames
key['total_frames'] = total_frames
key['nan_in_rf'] = np.sum(~np.isnan((rf.EyeFrame() & key).fetch['pupil_x']))
self.insert1(key)
def plot_comparison(self):
#embed()
# TODO: Make this a proper plotting function
N = 5
fig, ax = plt.subplots(1, 2)
ind = np.arange(N)
# width = 0.35
x0 = (self & 'filter_protocol_id=0').fetch['pos_err']
x1 = (self & 'filter_protocol_id=1').fetch['pos_err']
x2 = (self & 'filter_protocol_id=2').fetch['pos_err']
x3 = (self & 'filter_protocol_id=3').fetch['pos_err']
x4 = (self & 'filter_protocol_id=4').fetch['pos_err']
# means = [np.mean(x0), np.mean(x1), np.mean(x2), np.mean(x3)]
# std = [np.std(x0), np.std(x1), np.std(x2), np.std(x3)]
rects0 = ax[0].bar(0, np.mean(x0), color='r', ecolor='k', align='center', yerr=np.std(x0))
rects1 = ax[0].bar(1, np.mean(x1), color='b', ecolor='k', align='center', yerr=np.std(x1))
rects2 = ax[0].bar(2, np.mean(x2), color='g', ecolor='k', align='center', yerr=np.std(x2))
rects3 = ax[0].bar(3, np.mean(x3), color='y', ecolor='k', align='center', yerr=np.std(x3))
rects4 = ax[0].bar(4, np.mean(x4), color='m', ecolor='k', align='center', yerr=np.std(x4))
ax[0].plot(ind, [x0,x1,x2,x3,x4], '-o')
ax[0].set_xticks(ind)
label0 = r'$\mu =%.2f\pm\sigma =%.2f$' % (np.mean(x0), np.std(x0))
label1 = r'$\mu =%.2f\pm\sigma =%.2f$' % (np.mean(x1), np.std(x1))
label2 = r'$\mu =%.2f\pm\sigma =%.2f$' % (np.mean(x2), np.std(x2))
label3 = r'$\mu =%.2f\pm\sigma =%.2f$' % (np.mean(x3), np.std(x3))
label4 = r'$\mu =%.2f\pm\sigma =%.2f$' % (np.mean(x4), np.std(x4))
lbls = SelectionProtocol().fetch['protocol_name']
ax[0].set_xticklabels(lbls, rotation=45, ha='right')
ax[0].set_ylabel('RMSE pupil centre position')
ax[0].set_xlabel('Filter Protocol ID')
ax[0].legend((rects0[0], rects1[0], rects2[0], rects3[0], rects4[0]), (label0, label1, label2, label3, label4))
nan = (self & 'filter_protocol_id=0').fetch['nan_in_rf']
mf = (self & 'filter_protocol_id=0').fetch['missed_frames']
ef = (self & 'filter_protocol_id=0').fetch['excess_frames']
p = (nan-mf)/(nan-mf+ef)
r = (nan-mf)/nan
pts0 = ax[1].plot(r, p, 'ok', color='r')
nan = (self & 'filter_protocol_id=1').fetch['nan_in_rf']
mf = (self & 'filter_protocol_id=1').fetch['missed_frames']
ef = (self & 'filter_protocol_id=1').fetch['excess_frames']
p = (nan-mf)/(nan-mf+ef)
r = (nan-mf)/nan
pts1 = ax[1].plot(r, p, 'ok', color='b')
nan = (self & 'filter_protocol_id=2').fetch['nan_in_rf']
mf = (self & 'filter_protocol_id=2').fetch['missed_frames']
ef = (self & 'filter_protocol_id=2').fetch['excess_frames']
p = (nan-mf)/(nan-mf+ef)
r = (nan-mf)/nan
pts2 = ax[1].plot(r, p, 'ok', color='g')
nan = (self & 'filter_protocol_id=3').fetch['nan_in_rf']
mf = (self & 'filter_protocol_id=3').fetch['missed_frames']
ef = (self & 'filter_protocol_id=3').fetch['excess_frames']
p = (nan-mf)/(nan-mf+ef)
r = (nan-mf)/nan
pts3 = ax[1].plot(r, p, 'ok', color='y')
nan = (self & 'filter_protocol_id=4').fetch['nan_in_rf']
mf = (self & 'filter_protocol_id=4').fetch['missed_frames']
ef = (self & 'filter_protocol_id=4').fetch['excess_frames']
p = (nan-mf)/(nan-mf+ef)
r = (nan-mf)/nan
pts4 = ax[1].plot(r, p, 'ok', color='m')
ax[1].legend((pts0[0], pts1[0], pts2[0], pts3[0], pts4[0]), tuple(lbls), loc=5)
ax[1].set_ylabel('Precision values')
ax[1].set_xlabel('Recall values')
ax[1].set_ylim((0, 1.05))
ax[1].set_xlim((0, 1.05))
fig.tight_layout()
fig.savefig('err_pup_x_with_fil_pr.png')
| fabiansinz/pipeline | python/pipeline/legacy/trk.py | Python | lgpl-3.0 | 20,366 | 0.00545 |
# vim: tabstop=4 shiftwidth=4 expandtab
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
try:
import tracemalloc
except ImportError:
tracemalloc = None
from libqtile.log_utils import init_log
from libqtile.dgroups import DGroups
from xcffib.xproto import EventMask, WindowError, AccessError, DrawableError
import logging
import os
import pickle
import shlex
import signal
import sys
import traceback
import xcffib
import xcffib.xinerama
import xcffib.xproto
import six
from six.moves import asyncio
from .config import Drag, Click, Screen, Match, Rule
from .group import _Group
from .state import QtileState
from .utils import QtileError, get_cache_dir
from .widget.base import _Widget
from . import command
from . import hook
from . import utils
from . import window
from . import xcbq
if sys.version_info >= (3, 3):
def _import_module(module_name, dir_path):
import importlib
file_name = os.path.join(dir_path, module_name) + '.py'
f = importlib.machinery.SourceFileLoader(module_name, file_name)
module = f.load_module()
return module
else:
def _import_module(module_name, dir_path):
import imp
try:
fp, pathname, description = imp.find_module(module_name, [dir_path])
module = imp.load_module(module_name, fp, pathname, description)
finally:
if fp:
fp.close()
return module
class Qtile(command.CommandObject):
"""
This object is the __root__ of the command graph.
"""
def __init__(self, config,
displayName=None, fname=None, no_spawn=False, log=None,
state=None):
logkwargs = {}
if hasattr(config, "log_level"):
logkwargs["log_level"] = config.log_level
if hasattr(config, "log_path"):
logkwargs["log_path"] = config.log_path
self.log = log or init_log(**logkwargs)
self.no_spawn = no_spawn
self._eventloop = None
self._finalize = False
if not displayName:
displayName = os.environ.get("DISPLAY")
if not displayName:
raise QtileError("No DISPLAY set.")
if not fname:
# Dots might appear in the host part of the display name
# during remote X sessions. Let's strip the host part first.
displayNum = displayName.partition(":")[2]
if "." not in displayNum:
displayName = displayName + ".0"
fname = command.find_sockfile(displayName)
self.conn = xcbq.Connection(displayName)
self.config = config
self.fname = fname
hook.init(self)
self.windowMap = {}
self.widgetMap = {}
self.groupMap = {}
self.groups = []
self.keyMap = {}
# Find the modifier mask for the numlock key, if there is one:
nc = self.conn.keysym_to_keycode(xcbq.keysyms["Num_Lock"])
self.numlockMask = xcbq.ModMasks[self.conn.get_modifier(nc)]
self.validMask = ~(self.numlockMask | xcbq.ModMasks["lock"])
# Because we only do Xinerama multi-screening,
# we can assume that the first
# screen's root is _the_ root.
self.root = self.conn.default_screen.root
self.root.set_attribute(
eventmask=(
EventMask.StructureNotify |
EventMask.SubstructureNotify |
EventMask.SubstructureRedirect |
EventMask.EnterWindow |
EventMask.LeaveWindow
)
)
self.root.set_property(
'_NET_SUPPORTED',
[self.conn.atoms[x] for x in xcbq.SUPPORTED_ATOMS]
)
self.supporting_wm_check_window = self.conn.create_window(-1, -1, 1, 1)
self.root.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
# setup the default cursor
self.root.set_cursor('left_ptr')
wmname = getattr(self.config, "wmname", "qtile")
self.supporting_wm_check_window.set_property('_NET_WM_NAME', wmname)
self.supporting_wm_check_window.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
if config.main:
config.main(self)
self.dgroups = None
if self.config.groups:
key_binder = None
if hasattr(self.config, 'dgroups_key_binder'):
key_binder = self.config.dgroups_key_binder
self.dgroups = DGroups(self, self.config.groups, key_binder)
if hasattr(config, "widget_defaults") and config.widget_defaults:
_Widget.global_defaults = config.widget_defaults
else:
_Widget.global_defaults = {}
for i in self.groups:
self.groupMap[i.name] = i
self.setup_eventloop()
self.server = command._Server(self.fname, self, config, self._eventloop)
self.currentScreen = None
self.screens = []
self._process_screens()
self.currentScreen = self.screens[0]
self._drag = None
self.ignoreEvents = set([
xcffib.xproto.KeyReleaseEvent,
xcffib.xproto.ReparentNotifyEvent,
xcffib.xproto.CreateNotifyEvent,
# DWM handles this to help "broken focusing windows".
xcffib.xproto.MapNotifyEvent,
xcffib.xproto.LeaveNotifyEvent,
xcffib.xproto.FocusOutEvent,
xcffib.xproto.FocusInEvent,
xcffib.xproto.NoExposureEvent
])
self.conn.flush()
self.conn.xsync()
self._xpoll()
# Map and Grab keys
for key in self.config.keys:
self.mapKey(key)
# It fixes problems with focus when clicking windows of some specific clients like xterm
def noop(qtile):
pass
self.config.mouse += (Click([], "Button1", command.lazy.function(noop), focus="after"),)
self.mouseMap = {}
for i in self.config.mouse:
if self.mouseMap.get(i.button_code) is None:
self.mouseMap[i.button_code] = []
self.mouseMap[i.button_code].append(i)
self.grabMouse()
# no_spawn is set when we are restarting; we only want to run the
# startup hook once.
if not no_spawn:
hook.fire("startup_once")
hook.fire("startup")
self.scan()
self.update_net_desktops()
hook.subscribe.setgroup(self.update_net_desktops)
if state:
st = pickle.load(six.BytesIO(state.encode()))
try:
st.apply(self)
except:
log.exception("failed restoring state")
self.selection = {
"PRIMARY": {"owner": None, "selection": ""},
"CLIPBOARD": {"owner": None, "selection": ""}
}
self.setup_selection()
def setup_selection(self):
PRIMARY = self.conn.atoms["PRIMARY"]
CLIPBOARD = self.conn.atoms["CLIPBOARD"]
self.selection_window = self.conn.create_window(-1, -1, 1, 1)
self.selection_window.set_attribute(eventmask=EventMask.PropertyChange)
self.conn.xfixes.select_selection_input(self.selection_window,
"PRIMARY")
self.conn.xfixes.select_selection_input(self.selection_window,
"CLIPBOARD")
r = self.conn.conn.core.GetSelectionOwner(PRIMARY).reply()
self.selection["PRIMARY"]["owner"] = r.owner
r = self.conn.conn.core.GetSelectionOwner(CLIPBOARD).reply()
self.selection["CLIPBOARD"]["owner"] = r.owner
# ask for selection on starup
self.convert_selection(PRIMARY)
self.convert_selection(CLIPBOARD)
def setup_eventloop(self):
self._eventloop = asyncio.new_event_loop()
self._eventloop.add_signal_handler(signal.SIGINT, self.stop)
self._eventloop.add_signal_handler(signal.SIGTERM, self.stop)
self._eventloop.set_exception_handler(
lambda x, y: self.log.exception("Got an exception in poll loop")
)
self.log.info('Adding io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.add_reader(fd, self._xpoll)
self.setup_python_dbus()
def setup_python_dbus(self):
# This is a little strange. python-dbus internally depends on gobject,
# so gobject's threads need to be running, and a gobject "main loop
# thread" needs to be spawned, but we try to let it only interact with
# us via calls to asyncio's call_soon_threadsafe.
try:
# We import dbus here to thrown an ImportError if it isn't
# available. Since the only reason we're running this thread is
# because of dbus, if dbus isn't around there's no need to run
# this thread.
import dbus # noqa
from gi.repository import GLib
def gobject_thread():
ctx = GLib.main_context_default()
while not self._finalize:
try:
ctx.iteration(True)
except Exception:
self.qtile.exception("got exception from gobject")
self._glib_loop = self.run_in_executor(gobject_thread)
except ImportError:
self.log.warning("importing dbus/gobject failed, dbus will not work.")
self._glib_loop = None
def finalize(self):
self._finalize = True
self._eventloop.remove_signal_handler(signal.SIGINT)
self._eventloop.remove_signal_handler(signal.SIGTERM)
self._eventloop.set_exception_handler(None)
try:
from gi.repository import GLib
GLib.idle_add(lambda: None)
self._eventloop.run_until_complete(self._glib_loop)
except ImportError:
pass
try:
for w in self.widgetMap.values():
w.finalize()
for l in self.config.layouts:
l.finalize()
for screen in self.screens:
for bar in [screen.top, screen.bottom, screen.left, screen.right]:
if bar is not None:
bar.finalize()
self.log.info('Removing io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.remove_reader(fd)
self.conn.finalize()
self.server.close()
except:
self.log.exception('exception during finalize')
finally:
self._eventloop.close()
self._eventloop = None
def _process_fake_screens(self):
"""
Since Xephyr, Xnest don't really support offset screens,
we'll fake it here for testing, (or if you want to partition
a physical monitor into separate screens)
"""
for i, s in enumerate(self.config.fake_screens):
# should have x,y, width and height set
s._configure(self, i, s.x, s.y, s.width, s.height, self.groups[i])
if not self.currentScreen:
self.currentScreen = s
self.screens.append(s)
def _process_screens(self):
if hasattr(self.config, 'fake_screens'):
self._process_fake_screens()
return
# What's going on here is a little funny. What we really want is only
# screens that don't overlap here; overlapping screens should see the
# same parts of the root window (i.e. for people doing xrandr
# --same-as). However, the order that X gives us psuedoscreens in is
# important, because it indicates what people have chosen via xrandr
# --primary or whatever. So we need to alias screens that should be
# aliased, but preserve order as well. See #383.
xywh = {}
screenpos = []
for s in self.conn.pseudoscreens:
pos = (s.x, s.y)
(w, h) = xywh.get(pos, (0, 0))
if pos not in xywh:
screenpos.append(pos)
xywh[pos] = (max(w, s.width), max(h, s.height))
for i, (x, y) in enumerate(screenpos):
(w, h) = xywh[(x, y)]
if i + 1 > len(self.config.screens):
scr = Screen()
else:
scr = self.config.screens[i]
if not self.currentScreen:
self.currentScreen = scr
scr._configure(
self,
i,
x,
y,
w,
h,
self.groups[i],
)
self.screens.append(scr)
if not self.screens:
if self.config.screens:
s = self.config.screens[0]
else:
s = Screen()
self.currentScreen = s
s._configure(
self,
0, 0, 0,
self.conn.default_screen.width_in_pixels,
self.conn.default_screen.height_in_pixels,
self.groups[0],
)
self.screens.append(s)
def mapKey(self, key):
self.keyMap[(key.keysym, key.modmask & self.validMask)] = key
code = self.conn.keysym_to_keycode(key.keysym)
self.root.grab_key(
code,
key.modmask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_key(
code,
key.modmask | self.numlockMask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def unmapKey(self, key):
key_index = (key.keysym, key.modmask & self.validMask)
if key_index not in self.keyMap:
return
code = self.conn.keysym_to_keycode(key.keysym)
self.root.ungrab_key(code, key.modmask)
if self.numlockMask:
self.root.ungrab_key(code, key.modmask | self.numlockMask)
self.root.ungrab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"]
)
del(self.keyMap[key_index])
def update_net_desktops(self):
try:
index = self.groups.index(self.currentGroup)
# TODO: we should really only except ValueError here, AttributeError is
# an annoying chicken and egg because we're accessing currentScreen
# (via currentGroup), and when we set up the initial groups, there
# aren't any screens yet. This can probably be changed when #475 is
# fixed.
except (ValueError, AttributeError):
index = 0
self.root.set_property("_NET_NUMBER_OF_DESKTOPS", len(self.groups))
self.root.set_property(
"_NET_DESKTOP_NAMES", "\0".join([i.name for i in self.groups])
)
self.root.set_property("_NET_CURRENT_DESKTOP", index)
def addGroup(self, name, layout=None, layouts=None):
if name not in self.groupMap.keys():
g = _Group(name, layout)
self.groups.append(g)
if not layouts:
layouts = self.config.layouts
g._configure(layouts, self.config.floating_layout, self)
self.groupMap[name] = g
hook.fire("addgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
return True
return False
def delGroup(self, name):
# one group per screen is needed
if len(self.groups) == len(self.screens):
raise ValueError("Can't delete all groups.")
if name in self.groupMap.keys():
group = self.groupMap[name]
if group.screen and group.screen.previous_group:
target = group.screen.previous_group
else:
target = group.prevGroup()
# Find a group that's not currently on a screen to bring to the
# front. This will terminate because of our check above.
while target.screen:
target = target.prevGroup()
for i in list(group.windows):
i.togroup(target.name)
if self.currentGroup.name == name:
self.currentScreen.setGroup(target, save_prev=False)
self.groups.remove(group)
del(self.groupMap[name])
hook.fire("delgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
def registerWidget(self, w):
"""
Register a bar widget. If a widget with the same name already
exists, this will silently ignore that widget. However, this is
not necessarily a bug. By default a widget's name is just
self.__class__.lower(), so putting multiple widgets of the same
class will alias and one will be inaccessable. Since more than one
groupbox widget is useful when you have more than one screen, this
is a not uncommon occurrence. If you want to use the debug
info for widgets with the same name, set the name yourself.
"""
if w.name:
if w.name in self.widgetMap:
return
self.widgetMap[w.name] = w
@utils.LRUCache(200)
def colorPixel(self, name):
return self.conn.screens[0].default_colormap.alloc_color(name).pixel
@property
def currentLayout(self):
return self.currentGroup.layout
@property
def currentGroup(self):
return self.currentScreen.group
@property
def currentWindow(self):
return self.currentScreen.group.currentWindow
def scan(self):
_, _, children = self.root.query_tree()
for item in children:
try:
attrs = item.get_attributes()
state = item.get_wm_state()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
continue
if attrs and attrs.map_state == xcffib.xproto.MapState.Unmapped:
continue
if state and state[0] == window.WithdrawnState:
continue
self.manage(item)
def unmanage(self, win):
c = self.windowMap.get(win)
if c:
hook.fire("client_killed", c)
self.reset_gaps(c)
if getattr(c, "group", None):
c.group.remove(c)
del self.windowMap[win]
self.update_client_list()
def reset_gaps(self, c):
if c.strut:
self.update_gaps((0, 0, 0, 0), c.strut)
def update_gaps(self, strut, old_strut=None):
from libqtile.bar import Gap
(left, right, top, bottom) = strut[:4]
if old_strut:
(old_left, old_right, old_top, old_bottom) = old_strut[:4]
if not left and old_left:
self.currentScreen.left = None
elif not right and old_right:
self.currentScreen.right = None
elif not top and old_top:
self.currentScreen.top = None
elif not bottom and old_bottom:
self.currentScreen.bottom = None
if top:
self.currentScreen.top = Gap(top)
elif bottom:
self.currentScreen.bottom = Gap(bottom)
elif left:
self.currentScreen.left = Gap(left)
elif right:
self.currentScreen.right = Gap(right)
self.currentScreen.resize()
def manage(self, w):
try:
attrs = w.get_attributes()
internal = w.get_property("QTILE_INTERNAL")
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if attrs and attrs.override_redirect:
return
if w.wid not in self.windowMap:
if internal:
try:
c = window.Internal(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
self.windowMap[w.wid] = c
else:
try:
c = window.Window(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if w.get_wm_type() == "dock" or c.strut:
c.static(self.currentScreen.index)
else:
hook.fire("client_new", c)
# Window may be defunct because
# it's been declared static in hook.
if c.defunct:
return
self.windowMap[w.wid] = c
# Window may have been bound to a group in the hook.
if not c.group:
self.currentScreen.group.add(c, focus=c.can_steal_focus())
self.update_client_list()
hook.fire("client_managed", c)
return c
else:
return self.windowMap[w.wid]
def update_client_list(self):
"""
Updates the client stack list
this is needed for third party tasklists
and drag and drop of tabs in chrome
"""
windows = [wid for wid, c in self.windowMap.items() if c.group]
self.root.set_property("_NET_CLIENT_LIST", windows)
# TODO: check stack order
self.root.set_property("_NET_CLIENT_LIST_STACKING", windows)
def grabMouse(self):
self.root.ungrab_button(None, None)
for i in self.config.mouse:
if isinstance(i, Click) and i.focus:
# Make a freezing grab on mouse button to gain focus
# Event will propagate to target window
grabmode = xcffib.xproto.GrabMode.Sync
else:
grabmode = xcffib.xproto.GrabMode.Async
eventmask = EventMask.ButtonPress
if isinstance(i, Drag):
eventmask |= EventMask.ButtonRelease
self.root.grab_button(
i.button_code,
i.modmask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
def grabKeys(self):
self.root.ungrab_key(None, None)
for key in self.keyMap.values():
self.mapKey(key)
def get_target_chain(self, ename, e):
"""
Returns a chain of targets that can handle this event. The event
will be passed to each target in turn for handling, until one of
the handlers returns False or the end of the chain is reached.
"""
chain = []
handler = "handle_%s" % ename
# Certain events expose the affected window id as an "event" attribute.
eventEvents = [
"EnterNotify",
"ButtonPress",
"ButtonRelease",
"KeyPress",
]
c = None
if hasattr(e, "window"):
c = self.windowMap.get(e.window)
elif hasattr(e, "drawable"):
c = self.windowMap.get(e.drawable)
elif ename in eventEvents:
c = self.windowMap.get(e.event)
if c and hasattr(c, handler):
chain.append(getattr(c, handler))
if hasattr(self, handler):
chain.append(getattr(self, handler))
if not chain:
self.log.info("Unknown event: %r" % ename)
return chain
def _xpoll(self):
while True:
try:
e = self.conn.conn.poll_for_event()
if not e:
break
ename = e.__class__.__name__
if ename.endswith("Event"):
ename = ename[:-5]
if e.__class__ not in self.ignoreEvents:
self.log.debug(ename)
for h in self.get_target_chain(ename, e):
self.log.info("Handling: %s" % ename)
r = h(e)
if not r:
break
# Catch some bad X exceptions. Since X is event based, race
# conditions can occur almost anywhere in the code. For
# example, if a window is created and then immediately
# destroyed (before the event handler is evoked), when the
# event handler tries to examine the window properties, it
# will throw a WindowError exception. We can essentially
# ignore it, since the window is already dead and we've got
# another event in the queue notifying us to clean it up.
except (WindowError, AccessError, DrawableError):
pass
except Exception as e:
error_code = self.conn.conn.has_error()
if error_code:
error_string = xcbq.XCB_CONN_ERRORS[error_code]
self.log.exception("Shutting down due to X connection error %s (%s)" %
(error_string, error_code))
self.stop()
break
self.log.exception("Got an exception in poll loop")
self.conn.flush()
def stop(self):
self.log.info('Stopping eventloop')
self._eventloop.stop()
def loop(self):
self.server.start()
try:
self._eventloop.run_forever()
finally:
self.finalize()
def find_screen(self, x, y):
"""
Find a screen based on the x and y offset.
"""
result = []
for i in self.screens:
if x >= i.x and x <= i.x + i.width and \
y >= i.y and y <= i.y + i.height:
result.append(i)
if len(result) == 1:
return result[0]
return None
def find_closest_screen(self, x, y):
"""
If find_screen returns None, then this basically extends a
screen vertically and horizontally and see if x,y lies in the
band.
Only works if it can find a SINGLE closest screen, else we
revert to _find_closest_closest.
Useful when dragging a window out of a screen onto another but
having leftmost corner above viewport.
"""
normal = self.find_screen(x, y)
if normal is not None:
return normal
x_match = []
y_match = []
for i in self.screens:
if x >= i.x and x <= i.x + i.width:
x_match.append(i)
if y >= i.y and y <= i.y + i.height:
y_match.append(i)
if len(x_match) == 1:
return x_match[0]
if len(y_match) == 1:
return y_match[0]
return self._find_closest_closest(x, y, x_match + y_match)
def _find_closest_closest(self, x, y, candidate_screens):
"""
if find_closest_screen can't determine one, we've got multiple
screens, so figure out who is closer. We'll calculate using
the square of the distance from the center of a screen.
Note that this could return None if x, y is right/below all
screens (shouldn't happen but we don't do anything about it
here other than returning None)
"""
closest_distance = None
closest_screen = None
if not candidate_screens:
# try all screens
candidate_screens = self.screens
# if left corner is below and right of screen
# it can't really be a candidate
candidate_screens = [
s for s in candidate_screens
if x < s.x + s.width and y < s.y + s.width
]
for s in candidate_screens:
middle_x = s.x + s.width / 2
middle_y = s.y + s.height / 2
distance = (x - middle_x) ** 2 + (y - middle_y) ** 2
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_screen = s
return closest_screen
def handle_SelectionNotify(self, e):
if not getattr(e, "owner", None):
return
name = self.conn.atoms.get_name(e.selection)
self.selection[name]["owner"] = e.owner
self.selection[name]["selection"] = ""
self.convert_selection(e.selection)
hook.fire("selection_notify", name, self.selection[name])
def convert_selection(self, selection, _type="UTF8_STRING"):
TYPE = self.conn.atoms[_type]
self.conn.conn.core.ConvertSelection(self.selection_window.wid,
selection,
TYPE, selection,
xcffib.CurrentTime)
def handle_PropertyNotify(self, e):
name = self.conn.atoms.get_name(e.atom)
# it's the selection property
if name in ("PRIMARY", "CLIPBOARD"):
assert e.window == self.selection_window.wid
prop = self.selection_window.get_property(e.atom, "UTF8_STRING")
# If the selection property is None, it is unset, which means the
# clipboard is empty.
value = prop and prop.value.to_utf8() or six.u("")
self.selection[name]["selection"] = value
hook.fire("selection_change", name, self.selection[name])
def handle_EnterNotify(self, e):
if e.event in self.windowMap:
return True
s = self.find_screen(e.root_x, e.root_y)
if s:
self.toScreen(s.index, warp=False)
def handle_ClientMessage(self, event):
atoms = self.conn.atoms
opcode = event.type
data = event.data
# handle change of desktop
if atoms["_NET_CURRENT_DESKTOP"] == opcode:
index = data.data32[0]
try:
self.currentScreen.setGroup(self.groups[index])
except IndexError:
self.log.info("Invalid Desktop Index: %s" % index)
def handle_KeyPress(self, e):
keysym = self.conn.code_to_syms[e.detail][0]
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.keyMap.get((keysym, state & self.validMask))
if not k:
self.log.info("Ignoring unknown keysym: %s" % keysym)
return
for i in k.commands:
if i.check(self):
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs)
)
if status in (command.ERROR, command.EXCEPTION):
self.log.error("KB command error %s: %s" % (i.name, val))
else:
return
def cmd_focus_by_click(self, e):
wnd = e.child or e.root
# Additional option for config.py
# Brings clicked window to front
if self.config.bring_front_click:
self.conn.conn.core.ConfigureWindow(
wnd,
xcffib.xproto.ConfigWindow.StackMode,
[xcffib.xproto.StackMode.Above]
)
if self.windowMap.get(wnd):
self.currentGroup.focus(self.windowMap.get(wnd), False)
self.windowMap.get(wnd).focus(False)
self.conn.conn.core.AllowEvents(xcffib.xproto.Allow.ReplayPointer, e.time)
self.conn.conn.flush()
def handle_ButtonPress(self, e):
button_code = e.detail
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m or m.modmask & self.validMask != state & self.validMask:
self.log.info("Ignoring unknown button: %s" % button_code)
continue
if isinstance(m, Click):
for i in m.commands:
if i.check(self):
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if m.focus == "after":
self.cmd_focus_by_click(e)
if status in (command.ERROR, command.EXCEPTION):
self.log.error(
"Mouse command error %s: %s" % (i.name, val)
)
elif isinstance(m, Drag):
x = e.event_x
y = e.event_y
if m.start:
i = m.start
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if status in (command.ERROR, command.EXCEPTION):
self.log.error(
"Mouse command error %s: %s" % (i.name, val)
)
continue
else:
val = (0, 0)
if m.focus == "after":
self.cmd_focus_by_click(e)
self._drag = (x, y, val[0], val[1], m.commands)
self.root.grab_pointer(
True,
xcbq.ButtonMotionMask |
xcbq.AllButtonsMask |
xcbq.ButtonReleaseMask,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def handle_ButtonRelease(self, e):
button_code = e.detail
state = e.state & ~xcbq.AllButtonsMask
if self.numlockMask:
state = state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m:
self.log.info(
"Ignoring unknown button release: %s" % button_code
)
continue
if isinstance(m, Drag):
self._drag = None
self.root.ungrab_pointer()
def handle_MotionNotify(self, e):
if self._drag is None:
return
ox, oy, rx, ry, cmd = self._drag
dx = e.event_x - ox
dy = e.event_y - oy
if dx or dy:
for i in cmd:
if i.check(self):
status, val = self.server.call((
i.selectors,
i.name,
i.args + (rx + dx, ry + dy, e.event_x, e.event_y),
i.kwargs
))
if status in (command.ERROR, command.EXCEPTION):
self.log.error(
"Mouse command error %s: %s" % (i.name, val)
)
def handle_ConfigureNotify(self, e):
"""
Handle xrandr events.
"""
screen = self.currentScreen
if e.window == self.root.wid and \
e.width != screen.width and \
e.height != screen.height:
screen.resize(0, 0, e.width, e.height)
def handle_ConfigureRequest(self, e):
# It's not managed, or not mapped, so we just obey it.
cw = xcffib.xproto.ConfigWindow
args = {}
if e.value_mask & cw.X:
args["x"] = max(e.x, 0)
if e.value_mask & cw.Y:
args["y"] = max(e.y, 0)
if e.value_mask & cw.Height:
args["height"] = max(e.height, 0)
if e.value_mask & cw.Width:
args["width"] = max(e.width, 0)
if e.value_mask & cw.BorderWidth:
args["borderwidth"] = max(e.border_width, 0)
w = xcbq.Window(self.conn, e.window)
w.configure(**args)
def handle_MappingNotify(self, e):
self.conn.refresh_keymap()
if e.request == xcffib.xproto.Mapping.Keyboard:
self.grabKeys()
def handle_MapRequest(self, e):
w = xcbq.Window(self.conn, e.window)
c = self.manage(w)
if c and (not c.group or not c.group.screen):
return
w.map()
def handle_DestroyNotify(self, e):
self.unmanage(e.window)
def handle_UnmapNotify(self, e):
if e.event != self.root.wid:
c = self.windowMap.get(e.window)
if c and getattr(c, "group", None):
try:
c.window.unmap()
c.state = window.WithdrawnState
except xcffib.xproto.WindowError:
# This means that the window has probably been destroyed,
# but we haven't yet seen the DestroyNotify (it is likely
# next in the queue). So, we just let these errors pass
# since the window is dead.
pass
self.unmanage(e.window)
def handle_ScreenChangeNotify(self, e):
hook.fire("screen_change", self, e)
def toScreen(self, n, warp=True):
"""
Have Qtile move to screen and put focus there
"""
if len(self.screens) < n - 1:
return
old = self.currentScreen
self.currentScreen = self.screens[n]
if old != self.currentScreen:
hook.fire("current_screen_change")
self.currentGroup.focus(self.currentWindow, warp)
def moveToGroup(self, group):
"""
Create a group if it doesn't exist and move a windows there
"""
if self.currentWindow and group:
self.addGroup(group)
self.currentWindow.togroup(group)
def _items(self, name):
if name == "group":
return True, list(self.groupMap.keys())
elif name == "layout":
return True, list(range(len(self.currentGroup.layouts)))
elif name == "widget":
return False, list(self.widgetMap.keys())
elif name == "bar":
return False, [x.position for x in self.currentScreen.gaps]
elif name == "window":
return True, self.listWID()
elif name == "screen":
return True, list(range(len(self.screens)))
def _select(self, name, sel):
if name == "group":
if sel is None:
return self.currentGroup
else:
return self.groupMap.get(sel)
elif name == "layout":
if sel is None:
return self.currentGroup.layout
else:
return utils.lget(self.currentGroup.layouts, sel)
elif name == "widget":
return self.widgetMap.get(sel)
elif name == "bar":
return getattr(self.currentScreen, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
return self.clientFromWID(sel)
elif name == "screen":
if sel is None:
return self.currentScreen
else:
return utils.lget(self.screens, sel)
def listWID(self):
return [i.window.wid for i in self.windowMap.values()]
def clientFromWID(self, wid):
for i in self.windowMap.values():
if i.window.wid == wid:
return i
return None
def call_soon(self, func, *args):
""" A wrapper for the event loop's call_soon which also flushes the X
event queue to the server after func is called. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon(f)
def call_soon_threadsafe(self, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon_threadsafe(f)
def call_later(self, delay, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_later(delay, f)
def run_in_executor(self, func, *args):
""" A wrapper for running a function in the event loop's default
executor. """
return self._eventloop.run_in_executor(None, func, *args)
def cmd_debug(self):
"""Set log level to DEBUG"""
self.log.setLevel(logging.DEBUG)
self.log.debug('Switching to DEBUG threshold')
def cmd_info(self):
"""Set log level to INFO"""
self.log.setLevel(logging.INFO)
self.log.info('Switching to INFO threshold')
def cmd_warning(self):
"""Set log level to WARNING"""
self.log.setLevel(logging.WARNING)
self.log.warning('Switching to WARNING threshold')
def cmd_error(self):
"""Set log level to ERROR"""
self.log.setLevel(logging.ERROR)
self.log.error('Switching to ERROR threshold')
def cmd_critical(self):
"""Set log level to CRITICAL"""
self.log.setLevel(logging.CRITICAL)
self.log.critical('Switching to CRITICAL threshold')
def cmd_pause(self):
"""Drops into pdb"""
import pdb
pdb.set_trace()
def cmd_groups(self):
"""
Return a dictionary containing information for all groups.
Example:
groups()
"""
return dict((i.name, i.info()) for i in self.groups)
def cmd_get_info(self):
x = {}
for i in self.groups:
x[i.name] = i.info()
return x
def cmd_list_widgets(self):
"""
List of all addressible widget names.
"""
return list(self.widgetMap.keys())
def cmd_to_layout_index(self, index, group=None):
"""
Switch to the layout with the given index in self.layouts.
:index Index of the layout in the list of layouts.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.toLayoutIndex(index)
def cmd_next_layout(self, group=None):
"""
Switch to the next layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.nextLayout()
def cmd_prev_layout(self, group=None):
"""
Switch to the prev layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.prevLayout()
def cmd_screens(self):
"""
Return a list of dictionaries providing information on all screens.
"""
lst = []
for i in self.screens:
lst.append(dict(
index=i.index,
group=i.group.name if i.group is not None else None,
x=i.x,
y=i.y,
width=i.width,
height=i.height,
gaps=dict(
top=i.top.geometry() if i.top else None,
bottom=i.bottom.geometry() if i.bottom else None,
left=i.left.geometry() if i.left else None,
right=i.right.geometry() if i.right else None,
)
))
return lst
def cmd_simulate_keypress(self, modifiers, key):
"""
Simulates a keypress on the focused window.
:modifiers A list of modifier specification strings. Modifiers can
be one of "shift", "lock", "control" and "mod1" - "mod5".
:key Key specification.
Examples:
simulate_keypress(["control", "mod2"], "k")
"""
# FIXME: This needs to be done with sendevent, once we have that fixed.
keysym = xcbq.keysyms.get(key)
if keysym is None:
raise command.CommandError("Unknown key: %s" % key)
keycode = self.conn.first_sym_to_code[keysym]
class DummyEv(object):
pass
d = DummyEv()
d.detail = keycode
try:
d.state = utils.translateMasks(modifiers)
except KeyError as v:
return v.args[0]
self.handle_KeyPress(d)
def cmd_execute(self, cmd, args):
"""
Executes the specified command, replacing the current process.
"""
self.stop()
os.execv(cmd, args)
def cmd_restart(self):
"""
Restart qtile using the execute command.
"""
argv = [sys.executable] + sys.argv
if '--no-spawn' not in argv:
argv.append('--no-spawn')
buf = six.BytesIO()
try:
pickle.dump(QtileState(self), buf, protocol=0)
except:
self.log.error("Unable to pickle qtile state")
argv = [s for s in argv if not s.startswith('--with-state')]
argv.append('--with-state=' + buf.getvalue().decode())
self.cmd_execute(sys.executable, argv)
def cmd_spawn(self, cmd):
"""
Run cmd in a shell.
Example:
spawn("firefox")
"""
args = shlex.split(cmd)
r, w = os.pipe()
pid = os.fork()
if pid < 0:
os.close(r)
os.close(w)
return pid
if pid == 0:
os.close(r)
# close qtile's stdin, stdout, stderr so the called process doesn't
# pollute our xsession-errors.
os.close(0)
os.close(1)
os.close(2)
pid2 = os.fork()
if pid2 == 0:
os.close(w)
# Open /dev/null as stdin, stdout, stderr
try:
fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# This shouldn't happen, catch it just in case
pass
else:
if fd > 0:
# Again, this shouldn't happen, but we should just check
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
try:
os.execvp(args[0], args)
except OSError:
pass
os._exit(1)
else:
# Here it doesn't matter if fork failed or not, we just write
# its return code and exit.
os.write(w, str(pid2).encode())
os.close(w)
# sys.exit raises SystemExit, which will then be caught by our
# top level catchall and we'll end up with two qtiles; os._exit
# actually calls exit.
os._exit(0)
else:
os.close(w)
os.waitpid(pid, 0)
# 1024 bytes should be enough for any pid. :)
pid = os.read(r, 1024)
os.close(r)
return int(pid)
def cmd_status(self):
"""
Return "OK" if Qtile is running.
"""
return "OK"
def cmd_sync(self):
"""
Sync the X display. Should only be used for development.
"""
self.conn.flush()
def cmd_to_screen(self, n):
"""
Warp focus to screen n, where n is a 0-based screen number.
Example:
to_screen(0)
"""
return self.toScreen(n)
def cmd_next_screen(self):
"""
Move to next screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) + 1) % len(self.screens)
)
def cmd_prev_screen(self):
"""
Move to the previous screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) - 1) % len(self.screens)
)
def cmd_windows(self):
"""
Return info for each client window.
"""
return [
i.info() for i in self.windowMap.values()
if not isinstance(i, window.Internal)
]
def cmd_internal_windows(self):
"""
Return info for each internal window (bars, for example).
"""
return [
i.info() for i in self.windowMap.values()
if isinstance(i, window.Internal)
]
def cmd_qtile_info(self):
"""
Returns a dictionary of info on the Qtile instance.
"""
return dict(socketname=self.fname)
def cmd_shutdown(self):
"""
Quit Qtile.
"""
self.stop()
def cmd_switch_groups(self, groupa, groupb):
"""
Switch position of groupa to groupb
"""
if groupa not in self.groupMap or groupb not in self.groupMap:
return
indexa = self.groups.index(self.groupMap[groupa])
indexb = self.groups.index(self.groupMap[groupb])
self.groups[indexa], self.groups[indexb] = \
self.groups[indexb], self.groups[indexa]
hook.fire("setgroup")
# update window _NET_WM_DESKTOP
for group in (self.groups[indexa], self.groups[indexb]):
for w in group.windows:
w.group = group
def find_window(self, wid):
window = self.windowMap.get(wid)
if window:
if not window.group.screen:
self.currentScreen.setGroup(window.group)
window.group.focus(window, False)
def cmd_findwindow(self, prompt="window", widget="prompt"):
mb = self.widgetMap.get(widget)
if not mb:
self.log.error("No widget named '%s' present." % widget)
return
mb.startInput(
prompt,
self.find_window,
"window",
strict_completer=True
)
def cmd_next_urgent(self):
try:
nxt = [w for w in self.windowMap.values() if w.urgent][0]
nxt.group.cmd_toscreen()
nxt.group.focus(nxt)
except IndexError:
pass # no window had urgent set
def cmd_togroup(self, prompt="group", widget="prompt"):
"""
Move current window to the selected group in a propmt widget
prompt: Text with which to prompt user.
widget: Name of the prompt widget (default: "prompt").
"""
if not self.currentWindow:
self.log.warning("No window to move")
return
mb = self.widgetMap.get(widget)
if not mb:
self.log.error("No widget named '%s' present." % widget)
return
mb.startInput(prompt, self.moveToGroup, "group", strict_completer=True)
def cmd_switchgroup(self, prompt="group", widget="prompt"):
def f(group):
if group:
try:
self.groupMap[group].cmd_toscreen()
except KeyError:
self.log.info("No group named '%s' present." % group)
pass
mb = self.widgetMap.get(widget)
if not mb:
self.log.warning("No widget named '%s' present." % widget)
return
mb.startInput(prompt, f, "group", strict_completer=True)
def cmd_spawncmd(self, prompt="spawn", widget="prompt",
command="%s", complete="cmd"):
"""
Spawn a command using a prompt widget, with tab-completion.
prompt: Text with which to prompt user (default: "spawn: ").
widget: Name of the prompt widget (default: "prompt").
command: command template (default: "%s").
complete: Tab completion function (default: "cmd")
"""
def f(args):
if args:
self.cmd_spawn(command % args)
try:
mb = self.widgetMap[widget]
mb.startInput(prompt, f, complete)
except KeyError:
self.log.error("No widget named '%s' present." % widget)
def cmd_qtilecmd(self, prompt="command",
widget="prompt", messenger="xmessage"):
"""
Execute a Qtile command using the client syntax.
Tab completeion aids navigation of the command tree.
prompt: Text to display at the prompt (default: "command: ").
widget: Name of the prompt widget (default: "prompt").
messenger: command to display output (default: "xmessage").
Set this to None to disable.
"""
def f(cmd):
if cmd:
# c here is used in eval() below
c = command.CommandRoot(self) # noqa
try:
cmd_arg = str(cmd).split(' ')
except AttributeError:
return
cmd_len = len(cmd_arg)
if cmd_len == 0:
self.log.info('No command entered.')
return
try:
result = eval('c.%s' % (cmd))
except (
command.CommandError,
command.CommandException,
AttributeError) as err:
self.log.error(err)
result = None
if result is not None:
from pprint import pformat
message = pformat(result)
if messenger:
self.cmd_spawn('%s "%s"' % (messenger, message))
self.log.info(result)
mb = self.widgetMap[widget]
if not mb:
self.log.error("No widget named %s present." % widget)
return
mb.startInput(prompt, f, "qsh")
def cmd_addgroup(self, group):
return self.addGroup(group)
def cmd_delgroup(self, group):
return self.delGroup(group)
def cmd_add_rule(self, match_args, rule_args, min_priorty=False):
"""
Add a dgroup rule, returns rule_id needed to remove it
param: match_args (config.Match arguments)
param: rule_args (config.Rule arguments)
param: min_priorty if the rule is added with minimun prioriry(last)
"""
if not self.dgroups:
self.log.warning('No dgroups created')
return
match = Match(**match_args)
rule = Rule(match, **rule_args)
return self.dgroups.add_rule(rule, min_priorty)
def cmd_remove_rule(self, rule_id):
self.dgroups.remove_rule(rule_id)
def cmd_run_external(self, full_path):
def format_error(path, e):
s = """Can't call "main" from "{path}"\n\t{err_name}: {err}"""
return s.format(path=path, err_name=e.__class__.__name__, err=e)
module_name = os.path.splitext(os.path.basename(full_path))[0]
dir_path = os.path.dirname(full_path)
err_str = ""
local_stdout = six.BytesIO()
old_stdout = sys.stdout
sys.stdout = local_stdout
sys.exc_clear()
try:
module = _import_module(module_name, dir_path)
module.main(self)
except ImportError as e:
err_str += format_error(full_path, e)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
err_str += traceback.format_exc()
err_str += format_error(full_path, exc_type(exc_value))
finally:
sys.exc_clear()
sys.stdout = old_stdout
local_stdout.close()
return local_stdout.getvalue() + err_str
def cmd_hide_show_bar(self, position="all"):
"""
param: position one of: "top", "bottom", "left", "right" or "all"
"""
if position in ["top", "bottom", "left", "right"]:
bar = getattr(self.currentScreen, position)
if bar:
bar.show(not bar.is_show())
self.currentGroup.layoutAll()
else:
self.log.warning(
"Not found bar in position '%s' for hide/show." % position)
elif position == "all":
screen = self.currentScreen
is_show = None
for bar in [screen.left, screen.right, screen.top, screen.bottom]:
if bar:
if is_show is None:
is_show = not bar.is_show()
bar.show(is_show)
if is_show is not None:
self.currentGroup.layoutAll()
else:
self.log.warning("Not found bar for hide/show.")
else:
self.log.error("Invalid position value:%s" % position)
def cmd_get_state(self):
buf = six.BytesIO()
pickle.dump(QtileState(self), buf, protocol=0)
state = buf.getvalue().decode()
self.log.info('State = ')
self.log.info(''.join(state.split('\n')))
return state
def cmd_tracemalloc_toggle(self):
if not tracemalloc.is_tracing():
tracemalloc.start()
else:
tracemalloc.stop()
def cmd_tracemalloc_dump(self):
if not tracemalloc:
self.log.warning('No tracemalloc module')
raise command.CommandError("No tracemalloc module")
if not tracemalloc.is_tracing():
return [False, "Trace not started"]
cache_directory = get_cache_dir()
malloc_dump = os.path.join(cache_directory, "qtile_tracemalloc.dump")
tracemalloc.take_snapshot().dump(malloc_dump)
return [True, malloc_dump]
| nxnfufunezn/qtile | libqtile/manager.py | Python | mit | 59,901 | 0.00025 |
#!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing the hypervisor.hv_kvm module"""
import threading
import tempfile
import unittest
import socket
import os
import struct
import re
from ganeti import serializer
from ganeti import constants
from ganeti import compat
from ganeti import objects
from ganeti import errors
from ganeti import utils
from ganeti import pathutils
from ganeti.hypervisor import hv_kvm
import ganeti.hypervisor.hv_kvm.netdev as netdev
import ganeti.hypervisor.hv_kvm.monitor as monitor
import testutils
class QmpStub(threading.Thread):
"""Stub for a QMP endpoint for a KVM instance
"""
_QMP_BANNER_DATA = {
"QMP": {
"version": {
"package": "",
"qemu": {
"micro": 50,
"minor": 13,
"major": 0,
},
"capabilities": [],
},
}
}
_EMPTY_RESPONSE = {
"return": [],
}
_SUPPORTED_COMMANDS = {
"return": [
{"name": "command"},
{"name": "query-kvm"},
{"name": "eject"},
{"name": "query-status"},
{"name": "query-name"},
]
}
def __init__(self, socket_filename, server_responses):
"""Creates a QMP stub
@type socket_filename: string
@param socket_filename: filename of the UNIX socket that will be created
this class and used for the communication
@type server_responses: list
@param server_responses: list of responses that the server sends in response
to whatever it receives
"""
threading.Thread.__init__(self)
self.socket_filename = socket_filename
self.script = server_responses[:]
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.socket_filename)
self.socket.listen(1)
def run(self):
# Hypothesis: the messages we receive contain only a complete QMP message
# encoded in JSON.
conn, addr = self.socket.accept()
# Send the banner as the first thing
conn.send(self.encode_string(self._QMP_BANNER_DATA))
# Expect qmp_capabilities and return an empty response
conn.recv(4096)
conn.send(self.encode_string(self._EMPTY_RESPONSE))
# Expect query-commands and return the list of supported commands
conn.recv(4096)
conn.send(self.encode_string(self._SUPPORTED_COMMANDS))
while True:
# We ignore the expected message, as the purpose of this object is not
# to verify the correctness of the communication but to act as a
# partner for the SUT (System Under Test, that is QmpConnection)
msg = conn.recv(4096)
if not msg:
break
if not self.script:
break
response = self.script.pop(0)
if isinstance(response, str):
conn.send(response)
elif isinstance(response, list):
for chunk in response:
conn.send(chunk)
else:
raise errors.ProgrammerError("Unknown response type for %s" % response)
conn.close()
def encode_string(self, message):
return (serializer.DumpJson(message) +
hv_kvm.QmpConnection._MESSAGE_END_TOKEN)
class TestQmpMessage(testutils.GanetiTestCase):
def testSerialization(self):
test_data = {
"execute": "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
for k, v in test_data.items():
self.assertEqual(message[k], v)
serialized = str(message)
self.assertEqual(len(serialized.splitlines()), 1,
msg="Got multi-line message")
rebuilt_message = hv_kvm.QmpMessage.BuildFromJsonString(serialized)
self.assertEqual(rebuilt_message, message)
self.assertEqual(len(rebuilt_message), len(test_data))
def testDelete(self):
toDelete = "execute"
test_data = {
toDelete: "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
oldLen = len(message)
del(message[toDelete])
newLen = len(message)
self.assertEqual(oldLen - 1, newLen)
class TestQmp(testutils.GanetiTestCase):
REQUESTS = [
{"execute": "query-kvm", "arguments": []},
{"execute": "eject", "arguments": {"device": "ide1-cd0"}},
{"execute": "query-status", "arguments": []},
{"execute": "query-name", "arguments": []},
]
SERVER_RESPONSES = [
# One message, one send()
'{"return": {"enabled": true, "present": true}}\r\n',
# Message sent using multiple send()
['{"retur', 'n": {}}\r\n'],
# Multiple messages sent using one send()
'{"return": [{"name": "quit"}, {"name": "eject"}]}\r\n'
'{"return": {"running": true, "singlestep": false}}\r\n',
]
EXPECTED_RESPONSES = [
{"enabled": True, "present": True},
{},
[{"name": "quit"}, {"name": "eject"}],
{"running": True, "singlestep": False},
]
def testQmp(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Set up the QMP connection
qmp_connection = hv_kvm.QmpConnection(socket_file.name)
qmp_connection.connect()
# Format the script
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp_connection.Execute(request["execute"],
request["arguments"])
self.assertEqual(response, expected_response)
msg = hv_kvm.QmpMessage({"return": expected_response})
self.assertEqual(len(str(msg).splitlines()), 1,
msg="Got multi-line message")
self.assertRaises(monitor.QmpCommandNotSupported,
qmp_connection.Execute,
"unsupported-command")
def testQmpContextManager(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Test the context manager functionality
with hv_kvm.QmpConnection(socket_file.name) as qmp:
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp.Execute(request["execute"], request["arguments"])
self.assertEqual(response, expected_response)
class TestConsole(unittest.TestCase):
def _Test(self, instance, node, hvparams):
cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, node, hvparams, {})
self.assertTrue(cons.Validate())
return cons
def testSerial(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node6017-uuid")
node = objects.Node(name="node6017", uuid="node6017-uuid")
hvparams = {
constants.HV_SERIAL_CONSOLE: True,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: None,
}
cons = self._Test(instance, node, hvparams)
self.assertEqual(cons.kind, constants.CONS_SSH)
self.assertEqual(cons.host, node.name)
self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER)
self.assertEqual(cons.command[1], constants.SOCAT_PATH)
def testVnc(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node7235-uuid",
network_port=constants.VNC_BASE_PORT + 10)
node = objects.Node(name="node7235", uuid="node7235-uuid")
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: "192.0.2.1",
constants.HV_KVM_SPICE_BIND: None,
}
cons = self._Test(instance, node, hvparams)
self.assertEqual(cons.kind, constants.CONS_VNC)
self.assertEqual(cons.host, "192.0.2.1")
self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10)
self.assertEqual(cons.display, 10)
def testSpice(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node7235",
network_port=11000)
node = objects.Node(name="node7235", uuid="node7235-uuid")
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: "192.0.2.1",
}
cons = self._Test(instance, node, hvparams)
self.assertEqual(cons.kind, constants.CONS_SPICE)
self.assertEqual(cons.host, "192.0.2.1")
self.assertEqual(cons.port, 11000)
def testNoConsole(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node24325",
network_port=0)
node = objects.Node(name="node24325", uuid="node24325-uuid")
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: None,
}
cons = self._Test(instance, node, hvparams)
self.assertEqual(cons.kind, constants.CONS_MESSAGE)
class TestVersionChecking(testutils.GanetiTestCase):
def testParseVersion(self):
parse = hv_kvm.KVMHypervisor._ParseKVMVersion
help_112 = testutils.ReadTestData("kvm_1.1.2_help.txt")
help_10 = testutils.ReadTestData("kvm_1.0_help.txt")
help_01590 = testutils.ReadTestData("kvm_0.15.90_help.txt")
help_0125 = testutils.ReadTestData("kvm_0.12.5_help.txt")
help_091 = testutils.ReadTestData("kvm_0.9.1_help.txt")
self.assertEqual(parse(help_112), ("1.1.2", 1, 1, 2))
self.assertEqual(parse(help_10), ("1.0", 1, 0, 0))
self.assertEqual(parse(help_01590), ("0.15.90", 0, 15, 90))
self.assertEqual(parse(help_0125), ("0.12.5", 0, 12, 5))
self.assertEqual(parse(help_091), ("0.9.1", 0, 9, 1))
class TestSpiceParameterList(unittest.TestCase):
def test(self):
defaults = constants.HVC_DEFAULTS[constants.HT_KVM]
params = \
compat.UniqueFrozenset(getattr(constants, name)
for name in dir(constants)
if name.startswith("HV_KVM_SPICE_"))
# Parameters whose default value evaluates to True and don't need to be set
defaults_true = frozenset(filter(defaults.__getitem__, params))
self.assertEqual(defaults_true, frozenset([
constants.HV_KVM_SPICE_AUDIO_COMPR,
constants.HV_KVM_SPICE_USE_VDAGENT,
constants.HV_KVM_SPICE_TLS_CIPHERS,
]))
# HV_KVM_SPICE_BIND decides whether the other parameters must be set if
# their default evaluates to False
assert constants.HV_KVM_SPICE_BIND in params
assert constants.HV_KVM_SPICE_BIND not in defaults_true
# Exclude some parameters
params -= defaults_true | frozenset([
constants.HV_KVM_SPICE_BIND,
])
self.assertEqual(hv_kvm._SPICE_ADDITIONAL_PARAMS, params)
class TestHelpRegexps(testutils.GanetiTestCase):
def testBootRe(self):
"""Check _BOOT_RE
It has too match -drive.*boot=on|off except if there is another dash-option
at the beginning of the line.
"""
boot_re = hv_kvm.KVMHypervisor._BOOT_RE
help_112 = testutils.ReadTestData("kvm_1.1.2_help.txt")
help_10 = testutils.ReadTestData("kvm_1.0_help.txt")
help_01590 = testutils.ReadTestData("kvm_0.15.90_help.txt")
help_0125 = testutils.ReadTestData("kvm_0.12.5_help.txt")
help_091 = testutils.ReadTestData("kvm_0.9.1_help.txt")
help_091_fake = testutils.ReadTestData("kvm_0.9.1_help_boot_test.txt")
self.assertTrue(boot_re.search(help_091))
self.assertTrue(boot_re.search(help_0125))
self.assertFalse(boot_re.search(help_091_fake))
self.assertFalse(boot_re.search(help_112))
self.assertFalse(boot_re.search(help_10))
self.assertFalse(boot_re.search(help_01590))
class TestGetTunFeatures(unittest.TestCase):
def testWrongIoctl(self):
tmpfile = tempfile.NamedTemporaryFile()
# A file does not have the right ioctls, so this must always fail
result = netdev._GetTunFeatures(tmpfile.fileno())
self.assertTrue(result is None)
def _FakeIoctl(self, features, fd, request, buf):
self.assertEqual(request, netdev.TUNGETFEATURES)
(reqno, ) = struct.unpack("I", buf)
self.assertEqual(reqno, 0)
return struct.pack("I", features)
def test(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
for features in [0, netdev.IFF_VNET_HDR]:
fn = compat.partial(self._FakeIoctl, features)
result = netdev._GetTunFeatures(fd, _ioctl=fn)
self.assertEqual(result, features)
class TestProbeTapVnetHdr(unittest.TestCase):
def _FakeTunFeatures(self, expected_fd, flags, fd):
self.assertEqual(fd, expected_fd)
return flags
def test(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
for flags in [0, netdev.IFF_VNET_HDR]:
fn = compat.partial(self._FakeTunFeatures, fd, flags)
result = netdev._ProbeTapVnetHdr(fd, _features_fn=fn)
if flags == 0:
self.assertFalse(result)
else:
self.assertTrue(result)
def testUnsupported(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
self.assertFalse(netdev._ProbeTapVnetHdr(fd, _features_fn=lambda _: None))
class TestGenerateDeviceKVMId(unittest.TestCase):
def test(self):
device = objects.NIC()
target = constants.HOTPLUG_TARGET_NIC
fn = hv_kvm._GenerateDeviceKVMId
self.assertRaises(errors.HotplugError, fn, target, device)
device.pci = 5
device.uuid = "003fc157-66a8-4e6d-8b7e-ec4f69751396"
self.assertTrue(re.match("hotnic-003fc157-pci-5", fn(target, device)))
class TestGetRuntimeInfo(unittest.TestCase):
@classmethod
def _GetRuntime(cls):
data = testutils.ReadTestData("kvm_runtime.json")
return hv_kvm._AnalyzeSerializedRuntime(data)
def _fail(self, target, device, runtime):
device.uuid = "aaaaaaaa-66a8-4e6d-8b7e-ec4f69751396"
self.assertRaises(errors.HotplugError,
hv_kvm._GetExistingDeviceInfo,
target, device, runtime)
def testNIC(self):
device = objects.NIC()
target = constants.HOTPLUG_TARGET_NIC
runtime = self._GetRuntime()
self._fail(target, device, runtime)
device.uuid = "003fc157-66a8-4e6d-8b7e-ec4f69751396"
devinfo = hv_kvm._GetExistingDeviceInfo(target, device, runtime)
self.assertTrue(devinfo.pci==6)
def testDisk(self):
device = objects.Disk()
target = constants.HOTPLUG_TARGET_DISK
runtime = self._GetRuntime()
self._fail(target, device, runtime)
device.uuid = "9f5c5bd4-6f60-480b-acdc-9bb1a4b7df79"
(devinfo, _, __) = hv_kvm._GetExistingDeviceInfo(target, device, runtime)
self.assertTrue(devinfo.pci==5)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| apyrgio/snf-ganeti | test/py/ganeti.hypervisor.hv_kvm_unittest.py | Python | bsd-2-clause | 16,077 | 0.003981 |
from xmlrpc.client import ServerProxy
import sys
def help():
print("Usage : remote_finger [-lmsp] user..")
if __name__ == '__main__':
sys.argv = sys.argv[1:]
if len(sys.argv) == 0:
help()
sys.exit(1)
client = ServerProxy('http://localhost:8000')
print(client.finger(sys.argv))
sys.exit(0) | NileshPS/OS-and-Networking-programs | 7_rpc/client.py | Python | gpl-3.0 | 331 | 0.006042 |
from datetime import datetime
from app import app
from app.authentication import with_login
from flask import Blueprint, jsonify, request, Response
from app.generate_csv import generate_csv_clean
from app.msol_util import get_next_update_estimation_message_aws
from app.es.awsmetric import AWSMetric
from app.es.awsstat import AWSStat
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from app.aws_keys import with_multiple_aws_accounts
from dateutil.relativedelta import relativedelta
from app.generate_csv import generate_csv
from app.cache import compressed_json, decompressed_json, cache, with_cache
from hashlib import sha256
from .. import AWS_KEY_PROCESSING_INTERVAL_HOURS
import itertools
import calendar
import config
aws_cost_stats_bp = Blueprint('aws_cost_stats_bp', __name__)
def cut_cost_by_product(products, cut):
res = []
other = {'product': 'Other Services', 'cost': 0}
i = 0
for p in products:
if i < cut and p['cost'] >= 0.01:
res.append(p)
else:
other['cost'] += p['cost']
i += 1
if other['cost'] >= 0.01:
res.append(other)
return res
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycost(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/totalcost/<string:time_arg>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_totalcost(accounts, time_arg):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get total cost
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
this_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
this_month = this_day.replace(day=1)
time_val = {
'ever': AWSDetailedLineitem.get_first_date([account.get_aws_user_id() for account in accounts]),
'currentyear': this_month - relativedelta(months=this_month.month),
'currentmonth': this_month,
}
date_from = time_val.get(time_arg, now)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(raw_data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregion(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['intervals']['buckets']
res = [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region'), mimetype='text/csv')
return jsonify(months=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
byaccount=True,
date_from=date_from,
date_to=date_to)['accounts']['buckets']
res = [
{
'account_id': account['key'],
'account_name': [a.pretty for a in accounts if account['key'] == a.get_aws_user_id()][0],
'months': [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in account['intervals']['buckets']
]
}
for account in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region', account=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbytagbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbytagbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbytagbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
tagged=True,
byaccount=True,
date_from=date_from,
date_to=date_to)['accounts']['buckets']
def tagged_cost(bucket, total):
total_tag = 0.0
for tag in bucket:
total_tag += tag['cost']['value']
yield (tag['key'], tag['cost']['value'])
if total != total_tag:
yield ('untagged', total - total_tag)
res = [
{
'account_id': account['key'],
'account_name': [a.pretty for a in accounts if a.get_aws_user_id() == account['key']][0],
'months': [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'tags': [
{
'name': tag[0],
'cost': tag[1],
}
for tag in tagged_cost(region['tags']['buckets'], region['cost']['value'])
],
}
for region in data['regions']['buckets']
],
}
for data in account['intervals']['buckets']
]
}
for account in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region', account=True, tagged=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/dailycostbyproduct', defaults={'nb_days': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/dailycostbyproduct/<int:nb_days>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_dailycostbyproduct(accounts, nb_days):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get daily costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
days:
type: array
items:
properties:
day:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
now = AWSDetailedLineitem.get_last_date([account.get_aws_user_id() for account in accounts], limit=now)
date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(days=nb_days)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999) - relativedelta(days=1)
data = AWSDetailedLineitem.get_daily_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['days']
for d in data:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(days=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproduct', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproduct/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproduct(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
now = AWSDetailedLineitem.get_last_date([account.get_aws_user_id() for account in accounts], limit=now)
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1], hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['months']
for d in data:
if 'csv' not in request.args:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
if 'csv' in request.args:
return Response(generate_csv(data, 'products', 'product'), mimetype='text/csv')
return jsonify(months=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproductbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
month = nb_months - 1
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=month)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
res = [
{
'account_id': account.get_aws_user_id(),
'account_name': account.pretty,
'months': AWSDetailedLineitem.get_monthly_cost_by_product(keys=account.get_aws_user_id(),
date_from=date_from,
date_to=date_to)['months'],
}
for account in accounts
]
if 'csv' in request.args:
return Response(generate_csv(res, 'products', 'product', account=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbytagbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbytagbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproductbytagbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
month = nb_months - 1
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=month)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
res = [
{
'account_id': account.get_aws_user_id(),
'account_name': account.pretty,
'months': AWSDetailedLineitem.get_monthly_cost_by_product(keys=account.get_aws_user_id(),
tagged=True,
date_from=date_from,
date_to=date_to)['months'],
}
for account in accounts
]
if 'csv' in request.args:
return Response(generate_csv(res, 'products', 'product', account=True, tagged=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/yearlycostbyproduct', defaults={'nb_years': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/yearlycostbyproduct/<int:nb_years>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_yearlycostbyproduct(accounts, nb_years):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get yearly costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
years:
type: array
items:
properties:
year:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(years=nb_years - 1)
date_to = now.replace(month=12, day=31, hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_yearly_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['years']
for d in data:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(years=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/months')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_months(accounts):
raw_data = AWSDetailedLineitem.get_first_to_last_date([account.get_aws_user_id() for account in accounts])
if not raw_data:
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(months=[data.strftime("%Y-%m-01") for data in raw_data])
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/categories')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_month_categories_m(accounts, month):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = AWSDetailedLineitem.get_cost_by_resource([account.get_aws_user_id() for account in accounts], date_from=date_from)
cat = []
max_cat = 0
for new in raw_data:
x = 1
while new['cost'] > x:
x *= 10
if x >= max_cat:
max_cat = x
elif '<{}'.format(x) not in cat:
cat.append('<{}'.format(x))
cat.append('>{}'.format(max_cat / 10))
return jsonify(categories=cat)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/chart')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_month_chart_m(accounts, month):
# TODO: Use ES agg to categorize
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = [
AWSDetailedLineitem.get_cost_by_resource(account.get_aws_user_id(), date_from=date_from)
for account in accounts
]
data = []
def get_cat_with_cost(cost):
x = 1
while cost > x:
x *= 10
return x
def add_resource_in_data(new):
new_cat = get_cat_with_cost(new['cost'])
for cat in data:
if cat['category'] == '<{}'.format(new_cat):
cat['total'] += new['cost']
return
data.append(dict(category='<{}'.format(new_cat), total=new['cost']))
for one in raw_data:
for new in one:
add_resource_in_data(new)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
max_cat = 0
for i in range(len(data)):
if len(data[i]['category']) > len(data[max_cat]['category']):
max_cat = i
data[max_cat]['category'] = data[max_cat]['category'][:-1]
data[max_cat]['category'] = data[max_cat]['category'].replace('<', '>', 1)
return jsonify(categories=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/<category>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_m(accounts, month, category):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
assert category[0] in ['<', '>']
cat = int(category[1:])
except:
return jsonify(error='Not found.'), 404
raw_data = AWSDetailedLineitem.get_cost_by_resource([account.get_aws_user_id() for account in accounts], date_from=date_from)
def transform(r):
r['resource_name'] = r['resource']
return r
minus = category[0] == '<'
data = [
transform(r)
for r in raw_data
if (minus and cat > r['cost'] >= cat / 10) or (not minus and r['cost'] > cat)
]
if len(data) <= 0:
return jsonify(error='Not found.'), 404
return jsonify(category=dict(resources=data, total=sum([x['cost'] for x in data])))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/search/<search>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_search_m(accounts, month, search):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = [
AWSDetailedLineitem.get_cost_by_resource(account.get_aws_user_id(), date_from=date_from, search=search)
for account in accounts
]
def transform(r):
r['resource_name'] = r['resource']
return r
data = [
transform(r)
for raw in raw_data
for r in raw
]
if not len(data):
return jsonify(error='Not found.'), 404
return jsonify(search_result=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/tags')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags(accounts):
tags = AWSDetailedLineitem.get_available_tags([account.get_aws_user_id() for account in accounts])['tags']
if not len(tags):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/tags_only_with_data')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags_with_data(accounts):
tags = list(set(itertools.chain.from_iterable(
AWSDetailedLineitem.get_available_tags(account.get_aws_user_id(), only_with_data=account.key)['tags']
for account in accounts
)))
if not len(tags):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbytag/<path:tag>', defaults={'nb_months': 5})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbytag/<path:tag>/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_tags_months(accounts, nb_months, tag):
date_to = datetime.now()
date_from = date_to.replace(day=1, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
return jsonify(AWSDetailedLineitem.get_monthly_cost_by_tag([account.get_aws_user_id() for account in accounts], tag, date_from=date_from, date_to=date_to))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/underutilized')
@with_login()
@with_multiple_aws_accounts()
def aws_underutilized_resources(accounts):
return jsonify(AWSMetric.underutilized_resources(account.key for account in accounts))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/underutilizedreducedcost')
@with_login()
@with_multiple_aws_accounts()
def aws_underutilized_resources_reduced_cost(accounts):
now = datetime.utcnow()
date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=6)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
resources = AWSMetric.underutilized_resources(account.key for account in accounts)
resource_ids = set(r['id'] for r in resources['resources'])
months = AWSDetailedLineitem.get_monthly_cost_by_resource(resource_ids, date_from=date_from, date_to=date_to)
res = { # Simply multiply every cost by 20% as all instances usage is
k: v * 0.2 # less than 20%. TODO: intelligently find the best type
for k, v in months.iteritems()
}
return jsonify(res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/usagecost')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_usagecost(accounts):
def get_account_data(account):
for date, cpu_usage in dict(AWSMetric.daily_cpu_utilization(account.key)).iteritems():
yield (date, cpu_usage, None)
for date, cost in dict(AWSDetailedLineitem.get_ec2_daily_cost(account.get_aws_user_id())).iteritems():
yield (date, None, cost)
@with_cache()
def get_all_account_data():
return list(
itertools.chain.from_iterable(
get_account_data(account)
for account in accounts
)
)
data = get_all_account_data()
days = {}
for day, cpu_usage, cost in data:
day_data = days.setdefault(day, {'day': day, 'cpu': None, 'cost': None})
if cpu_usage is not None:
day_data['cpu'] = (day_data['cpu'] or 0.0) + cpu_usage
if cost is not None:
day_data['cost'] = (day_data['cost'] or 0.0) + cost
res = sorted([
value
for value in days.itervalues()
if value['cpu'] is not None and value['cost'] is not None # Comment/remove if None values are OK
], key=lambda e: e['day'])
if not res:
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(days=res)
def _build_list_used_transfer_types(stat_list):
return frozenset(
elem['type']
for bucket in stat_list
for elem in bucket['transfer_stats']
)
def _check_if_in_list(dict_list, value, key):
return next((item for item in dict_list if item[key] == value), None)
def _append_to_header_list(header_list, new_data):
for elem in new_data:
if elem not in header_list:
header_list.append(elem)
return header_list
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3buckettags')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags_for_s3(accounts):
tags = list(set(itertools.chain.from_iterable(
AWSDetailedLineitem.get_available_tags(
account.get_aws_user_id(),
product_name='Simple Storage Service',
)['tags']
for account in accounts
)))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3bucketsizepername')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_s3bucketsizepername(accounts):
"""---
get:
tags:
- aws
produces:
- application/csv
description: &desc Stats about cost and usage of bandwith and storag on s3 buckets, organised by name
summary: *desc
responses:
200:
description: Stats about cost and usage of bandwith and storag on s3 buckets, organised by name
403:
description: Not logged in
404:
description: AWS account not registered
"""
def _create_bandwith_breakdown(transfer_types_list, csv_row, bucket_bandwith_stat):
for elem in transfer_types_list:
_current_transfer_type = _check_if_in_list(bucket_bandwith_stat['transfer_stats'], elem, 'type')
if _current_transfer_type is not None:
csv_row[elem] = _current_transfer_type['data'] * 1024 * 1024 * 1024 # The is by default given in GB
return csv_row
def _create_csv_rows(bucket_list, account, bandwith_cost, csv_row_all):
if bucket_list is None:
return []
for bucket in bucket_list['buckets']:
csv_row = {
'account_id': account.get_aws_user_id(),
'used_space': bucket['used_space'],
'name': bucket['name'],
'storage_cost': _check_if_in_list(bucket['prices'], bucket['provider'], 'provider')['cost']
}
bucket_bandwith_stat = _check_if_in_list(bandwith_cost, bucket['name'], 'bucket_name')
if bucket_bandwith_stat is not None:
csv_row = _create_bandwith_breakdown(transfer_types_list, csv_row, bucket_bandwith_stat)
csv_row['bandwith_cost'] = bucket_bandwith_stat['cost'] if bucket_bandwith_stat is not None else 0
csv_row['total_cost'] = csv_row['storage_cost'] + csv_row['bandwith_cost']
csv_row_all.append(csv_row)
return csv_row_all
assert len(accounts) > 0
csv_header = ['account_id', 'name', 'used_space', 'storage_cost', 'bandwith_cost', 'total_cost']
csv_row_all = []
for account in accounts:
bucket_list = AWSStat.latest_s3_space_usage(account)
bucket_ids = [
bucket['name']
for bucket in (bucket_list['buckets'] if bucket_list is not None else [])
]
bandwith_cost = AWSDetailedLineitem.get_s3_bandwith_info_and_cost_per_name(account.get_aws_user_id(), bucket_ids)
transfer_types_list = _build_list_used_transfer_types(bandwith_cost)
csv_header = _append_to_header_list(csv_header, transfer_types_list)
csv_row_all = _create_csv_rows(bucket_list, account, bandwith_cost, csv_row_all)
if len(csv_row_all) > 0 and csv_row_all[0] is None:
csv_row_all = []
if 'csv' in request.args:
return Response(generate_csv_clean(csv_row_all, csv_header))
return jsonify(accounts=csv_row_all)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3bucketsizepertag/<path:tag>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_s3bucketsizepertag(accounts, tag):
"""---
get:
tags:
- aws
produces:
- application/csv
description: &desc Stats about cost and usage of bandwith and storag on s3 buckets, organised by tag
summary: *desc
responses:
200:
description: Stats about cost and usage of bandwith and storag on s3 buckets, organised by tag
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
def _get_total_sizes_cost_and_names(bucket_names_list, bucket_list):
total_size = 0
total_cost = 0
names = ""
for bucket in bucket_list['buckets']:
if _check_if_in_list(bucket_names_list, bucket['name'], 'bucket_name') is not None:
total_size += float(bucket['used_space'])
total_cost += _check_if_in_list(bucket['prices'], bucket['provider'], 'provider')['cost']
names += bucket['name'] + ", "
return total_size, names[:-2], total_cost
def _get_bandwith_info(account, bucket_names):
bucket_ids = [
bucket
for bucket in (bucket_names if isinstance(bucket_names, list) else [bucket_names])
]
bandwith_cost = AWSDetailedLineitem.get_s3_bandwith_info_and_cost_per_name(account.get_aws_user_id(), bucket_ids)
return bandwith_cost
def _iterate_over_buckets_in_tag_for_total(bucket_bandwith_stat):
total_cost = 0
for bucket in (bucket_bandwith_stat if bucket_bandwith_stat is not None else []):
total_cost += bucket['cost']
return total_cost
def _iterate_over_buckets_and_make_breakdown_bandwith_stat(bucket_bandwith_stat, buff_row_csv, tag_value):
bandwith_cost = 0
for bucket in bucket_bandwith_stat:
bandwith_cost += bucket['cost']
for elem in bucket['transfer_stats']:
if elem['type'] in buff_row_csv:
buff_row_csv[elem['type']] += (elem['data'] * 1024 * 1024 * 1024)
else:
buff_row_csv[elem['type']] = (elem['data'] * 1024 * 1024 * 1024)
buff_row_csv['bandwith_cost'] = bandwith_cost
return buff_row_csv
def _build_csv_row_and_add_header(bucket_list_tagged, bucket_list, account, csv_header, csv_row_all):
if bucket_list_tagged is None:
return [], []
for tag_value in bucket_list_tagged['tag_value']:
bucket_info = _get_total_sizes_cost_and_names(tag_value['s3_buckets'], bucket_list)
bucket_bandwith_stat = _get_bandwith_info(account, bucket_info[1])
csv_header = _append_to_header_list(csv_header, _build_list_used_transfer_types(bucket_bandwith_stat))
csv_row = {
"tag_key": bucket_list_tagged['tag_key'].split(':')[1],
"tag_value": tag_value['tag_value'],
"account_id": tag_value['s3_buckets'][0]["account_id"],
"total_size": bucket_info[0],
"bucket_names": bucket_info[1],
"storage_cost": bucket_info[2],
}
csv_row = _iterate_over_buckets_and_make_breakdown_bandwith_stat(bucket_bandwith_stat, csv_row, tag_value)
csv_row['total_cost'] = csv_row['storage_cost'] + csv_row['bandwith_cost']
csv_row_all.append(csv_row)
return csv_header, csv_row_all
def _select_bucket_list_tag(bucket_list_per_tag, tag):
for bucket_list_tagged in bucket_list_per_tag:
if tag in bucket_list_tagged['tag_key'].split(':')[1]:
return bucket_list_tagged
csv_header = ["account_id", "tag_key", "tag_value", "total_size", "bucket_names", "bandwith_cost", "storage_cost", "total_cost"]
csv_data = []
for account in accounts:
bucket_list_per_tag = AWSDetailedLineitem.get_s3_buckets_per_tag(account.get_aws_user_id())
bucket_list_tagged = _select_bucket_list_tag(bucket_list_per_tag, tag)
bucket_list = AWSStat.latest_s3_space_usage(account)
csv_header, csv_data = _build_csv_row_and_add_header(bucket_list_tagged, bucket_list, account, csv_header, csv_data)
if 'csv' in request.args:
return Response(generate_csv_clean(csv_data, csv_header))
return jsonify(res=csv_data)
| giubil/trackit | api/files/api/app/views/aws/cost/stats.py | Python | apache-2.0 | 43,920 | 0.003575 |
from StringIO import StringIO
from datetime import datetime, timedelta
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.tests.util import check_xml_line_by_line
from casexml.apps.case.xml import V1
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.receiverwrapper.models import (
CaseRepeater,
FormRepeater,
RepeatRecord,
)
from couchforms.models import XFormInstance
case_id = "ABC123CASEID"
instance_id = "XKVB636DFYL38FNX3D38WV5EH"
update_instance_id = "ZYXKVB636DFYL38FNX3D38WV5"
case_block = """
<case>
<case_id>%s</case_id>
<date_modified>2011-12-19T00:00:00Z</date_modified>
<create>
<case_type_id>repeater_case</case_type_id>
<user_id>O2XLT0WZW97W1A91E2W1Y0NJG</user_id>
<case_name>ABC 123</case_name>
<external_id>ABC 123</external_id>
</create>
</case>
""" % case_id
update_block = """
<case>
<case_id>%s</case_id>
<date_modified>2011-12-19T00:00:00Z</date_modified>
<update>
<case_name>ABC 234</case_name>
</update>
</case>
""" % case_id
xform_xml_template = """<?xml version='1.0' ?>
<data xmlns:jrm="http://dev.commcarehq.org/jr/xforms" xmlns="https://www.commcarehq.org/test/repeater/">
<woman_name>Alpha</woman_name>
<husband_name>Beta</husband_name>
<meta>
<deviceID>O2XLT0WZW97W1A91E2W1Y0NJG</deviceID>
<timeStart>2011-10-01T15:25:18.404-04</timeStart>
<timeEnd>2011-10-01T15:26:29.551-04</timeEnd>
<username>admin</username>
<userID>O2XLT0WZW97W1A91E2W1Y0NJG</userID>
<instanceID>%s</instanceID>
</meta>
%s
</data>
"""
xform_xml = xform_xml_template % (instance_id, case_block)
update_xform_xml = xform_xml_template % (update_instance_id, update_block)
class RepeaterTest(TestCase):
def setUp(self):
self.client = Client()
self.domain = "test-domain"
create_domain(self.domain)
self.case_repeater = CaseRepeater(
domain=self.domain,
url='case-repeater-url',
version=V1,
)
self.case_repeater.save()
self.form_repeater = FormRepeater(
domain=self.domain,
url='form-repeater-url',
)
self.form_repeater.save()
self.log = []
self.post_xml(xform_xml)
def post_xml(self, xml):
f = StringIO(xml)
f.name = 'form.xml'
self.client.post(
reverse('receiver_post', args=[self.domain]), {
'xml_submission_file': f
}
)
def clear_log(self):
for i in range(len(self.log)): self.log.pop()
def make_post_fn(self, status_codes):
status_codes = iter(status_codes)
def post_fn(data, url, headers=None):
status_code = status_codes.next()
self.log.append((url, status_code, data, headers))
class resp:
status = status_code
return resp
return post_fn
def tearDown(self):
self.case_repeater.delete()
self.form_repeater.delete()
XFormInstance.get(instance_id).delete()
repeat_records = RepeatRecord.all()
for repeat_record in repeat_records:
repeat_record.delete()
def test_repeater(self):
CommCareCase.get(case_id)
def now():
return datetime.utcnow()
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 2)
self.clear_log()
records_by_repeater_id = {}
for repeat_record in repeat_records:
repeat_record.fire(post_fn=self.make_post_fn([404, 404, 404]))
repeat_record.save()
records_by_repeater_id[repeat_record.repeater_id] = repeat_record
for (url, status, data, headers) in self.log:
self.assertEqual(status, 404)
self.clear_log()
next_check_time = now() + timedelta(minutes=60)
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=now() + timedelta(minutes=15),
)
self.assertEqual(len(repeat_records), 0)
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=next_check_time + timedelta(seconds=2),
)
self.assertEqual(len(repeat_records), 2)
for repeat_record in repeat_records:
self.assertLess(abs(next_check_time - repeat_record.next_check),
timedelta(seconds=2))
repeat_record.fire(post_fn=self.make_post_fn([404, 200]))
repeat_record.save()
self.assertEqual(len(self.log), 4)
# The following is pretty fickle and depends on which of
# - corehq.apps.receiverwrapper.signals
# - casexml.apps.case.signals
# gets loaded first.
# This is deterministic but easily affected by minor code changes
# check case stuff
rec = records_by_repeater_id[self.case_repeater.get_id]
self.assertEqual(self.log[1][:2], (self.case_repeater.get_url(rec), 200))
self.assertIn('server-modified-on', self.log[1][3])
check_xml_line_by_line(self, self.log[1][2], case_block)
# check form stuff
rec = records_by_repeater_id[self.form_repeater.get_id]
self.assertEqual(self.log[3][:3],
(self.form_repeater.get_url(rec), 200, xform_xml))
self.assertIn('received-on', self.log[3][3])
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=next_check_time,
)
for repeat_record in repeat_records:
self.assertEqual(repeat_record.succeeded, True)
self.assertEqual(repeat_record.next_check, None)
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 0)
self.post_xml(update_xform_xml)
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 2)
class RepeaterLockTest(TestCase):
def testLocks(self):
r = RepeatRecord(domain='test')
r.save()
r2 = RepeatRecord.get(r._id)
self.assertTrue(r.acquire_lock(datetime.utcnow()))
r3 = RepeatRecord.get(r._id)
self.assertFalse(r2.acquire_lock(datetime.utcnow()))
self.assertFalse(r3.acquire_lock(datetime.utcnow()))
r.release_lock()
r4 = RepeatRecord.get(r._id)
self.assertTrue(r4.acquire_lock(datetime.utcnow()))
| SEL-Columbia/commcare-hq | corehq/apps/receiverwrapper/tests/test_repeater.py | Python | bsd-3-clause | 6,713 | 0.000447 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# tacker documentation build configuration file, created by
# sphinx-quickstart on Tue May 31 19:07:30 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'oslosphinx',
'reno.sphinxext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tacker Release Notes'
copyright = u'2016, Tacker Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import pbr.version
tacker_version = pbr.version.VersionInfo('tacker')
release = tacker_version.version_string_with_vcs()
version = tacker_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to
# use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tackerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'TackerReleaseNotes.tex',
u'Tacker Release Notes Documentation',
u'Tacker Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tackerreleasenotes', u'Tacker Release Notes Documentation',
[u'Tacker Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TackerReleaseNotes', u'Tacker Release Notes Documentation',
u'Tacker Developers', 'TackerReleaseNotes',
'Tacker Project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| priya-pp/Tacker | releasenotes/source/conf.py | Python | apache-2.0 | 8,504 | 0.000118 |
#!/usr/bin/env python2
# setup.py
from setuptools import setup, find_packages
setup(name='pumptweet',
version='2.1',
description='Cross posts from Pump.io to Twitter.',
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='README.md',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: Communications',
],
url='http://github.com/dper/pumptweet',
author='Douglas Paul Perkins',
author_email='contact@dperkins.org',
license='MIT',
packages=['pumptweet'],
install_requires=[
'pypump >= 0.7',
'python-twitter >= 3.1',
],
include_package_data=True,
scripts=[
'pt.py',
'pt.sh',
],
zip_safe=False)
| dper/pumptweet | setup.py | Python | mit | 1,006 | 0.036779 |
from html5lib import treewalkers
from htmlserializer import HTMLSerializer
from xhtmlserializer import XHTMLSerializer
def serialize(input, tree="simpletree", format="html", encoding=None,
**serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
if format == "html":
s = HTMLSerializer(**serializer_opts)
elif format == "xhtml":
s = XHTMLSerializer(**serializer_opts)
else:
raise ValueError, "type must be either html or xhtml"
return s.render(walker(input), encoding)
| bbondy/brianbondy.gae | libs/html5lib/serializer/__init__.py | Python | mit | 585 | 0.005128 |
#pylint: disable=C0111
from django.contrib.auth.models import User
from lettuce import world, step
from lettuce.django import django_url
from common import course_id
from student.models import CourseEnrollment
@step('I view the LTI and it is not rendered$')
def lti_is_not_rendered(_step):
# lti div has no class rendered
assert world.is_css_not_present('div.lti.rendered')
# error is shown
assert world.css_visible('.error_message')
# iframe is not visible
assert not world.css_visible('iframe')
#inside iframe test content is not presented
with world.browser.get_iframe('ltiLaunchFrame') as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_not_present_by_css('.result', wait_time=5)
@step('I view the LTI and it is rendered$')
def lti_is_rendered(_step):
# lti div has class rendered
assert world.is_css_present('div.lti.rendered')
# error is hidden
assert not world.css_visible('.error_message')
# iframe is visible
assert world.css_visible('iframe')
#inside iframe test content is presented
with world.browser.get_iframe('ltiLaunchFrame') as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=5)
assert ("This is LTI tool. Success." == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
# lti div has class rendered
assert world.is_css_present('div.lti.rendered')
# error is hidden
assert not world.css_visible('.error_message')
# iframe is visible
assert world.css_visible('iframe')
#inside iframe test content is presented
with world.browser.get_iframe('ltiLaunchFrame') as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=5)
assert ("Wrong LTI signature" == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('the course has correct LTI credentials$')
def set_correct_lti_passport(_step):
coursenum = 'test_course'
metadata = {
'lti_passports': ["correct_lti_id:{}:{}".format(
world.lti_server.oauth_settings['client_key'],
world.lti_server.oauth_settings['client_secret']
)]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = 'test_course'
metadata = {
'lti_passports': ["test_lti_id:{}:{}".format(
world.lti_server.oauth_settings['client_key'],
"incorrect_lti_secret_key"
)]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has an LTI component filled with correct fields$')
def add_correct_lti_to_course(_step):
category = 'lti'
world.ItemFactory.create(
# parent_location=section_location(course),
parent_location=world.scenario_dict['SEQUENTIAL'].location,
category=category,
display_name='LTI',
metadata={
'lti_id': 'correct_lti_id',
'launch_url': world.lti_server.oauth_settings['lti_base'] + world.lti_server.oauth_settings['lti_endpoint']
}
)
course = world.scenario_dict["COURSE"]
chapter_name = world.scenario_dict['SECTION'].display_name.replace(
" ", "_")
section_name = chapter_name
path = "/courses/{org}/{num}/{name}/courseware/{chapter}/{section}".format(
org=course.org,
num=course.number,
name=course.display_name.replace(' ', '_'),
chapter=chapter_name,
section=section_name)
url = django_url(path)
world.browser.visit(url)
@step('the course has an LTI component with incorrect fields$')
def add_incorrect_lti_to_course(_step):
category = 'lti'
world.ItemFactory.create(
parent_location=world.scenario_dict['SEQUENTIAL'].location,
category=category,
display_name='LTI',
metadata={
'lti_id': 'incorrect_lti_id',
'lti_url': world.lti_server.oauth_settings['lti_base'] + world.lti_server.oauth_settings['lti_endpoint']
}
)
course = world.scenario_dict["COURSE"]
chapter_name = world.scenario_dict['SECTION'].display_name.replace(
" ", "_")
section_name = chapter_name
path = "/courses/{org}/{num}/{name}/courseware/{chapter}/{section}".format(
org=course.org,
num=course.number,
name=course.display_name.replace(' ', '_'),
chapter=chapter_name,
section=section_name)
url = django_url(path)
world.browser.visit(url)
def create_course(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata
)
# Add a section to the course to contain problems
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
display_name='Test Section'
)
world.scenario_dict['SEQUENTIAL'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category='sequential',
display_name='Test Section')
def i_am_registered_for_the_course(course, metadata):
# Create the course
create_course(course, metadata)
# Create the user
world.create_user('robot', 'test')
usr = User.objects.get(username='robot')
# If the user is not already enrolled, enroll the user.
CourseEnrollment.enroll(usr, course_id(course))
world.log_in(username='robot', password='test')
| praveen-pal/edx-platform | lms/djangoapps/courseware/features/lti.py | Python | agpl-3.0 | 6,277 | 0.000956 |
"""
Functions performing URL trimming and cleaning
"""
## This file is available from https://github.com/adbar/courlan
## under GNU GPL v3 license
import logging
import re
from collections import OrderedDict
from urllib.parse import parse_qs, urlencode, urlparse, ParseResult
from .filters import validate_url
from .settings import ALLOWED_PARAMS, CONTROL_PARAMS,\
TARGET_LANG_DE, TARGET_LANG_EN
PROTOCOLS = re.compile(r'https?://')
SELECTION = re.compile(r'(https?://[^">&? ]+?)(?:https?://)|(?:https?://[^/]+?/[^/]+?[&?]u(rl)?=)(https?://[^"> ]+)')
MIDDLE_URL = re.compile(r'https?://.+?(https?://.+?)(?:https?://|$)')
NETLOC_RE = re.compile(r'(?<=\w):(?:80|443)')
PATH1 = re.compile(r'/+')
PATH2 = re.compile(r'^(?:/\.\.(?![^/]))+')
def clean_url(url, language=None):
'''Helper function: chained scrubbing and normalization'''
try:
return normalize_url(scrub_url(url), language)
except (AttributeError, ValueError):
return None
def scrub_url(url):
'''Strip unnecessary parts and make sure only one URL is considered'''
# trim
# https://github.com/cocrawler/cocrawler/blob/main/cocrawler/urls.py
# remove leading and trailing white space and unescaped control chars
url = url.strip('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \r\n')
# clean the input string
url = url.replace('[ \t]+', '')
# <![CDATA[http://www.urbanlife.de/item/260-bmw-i8-hybrid-revolution-unter-den-sportwagen.html]]>
if url.startswith('<![CDATA['): # re.match(r'<!\[CDATA\[', url):
url = url.replace('<![CDATA[', '') # url = re.sub(r'^<!\[CDATA\[', '', url)
url = url.replace(']]>', '') # url = re.sub(r'\]\]>$', '', url)
# markup rests
url = re.sub(r'</?a>', '', url)
# &
if '&' in url:
url = url.replace('&', '&')
#if '"' in link:
# link = link.split('"')[0]
# double/faulty URLs
protocols = PROTOCOLS.findall(url)
if len(protocols) > 1 and not 'web.archive.org' in url:
logging.debug('double url: %s %s', len(protocols), url)
match = SELECTION.match(url)
if match and validate_url(match.group(1))[0] is True:
url = match.group(1)
logging.debug('taking url: %s', url)
else:
match = MIDDLE_URL.match(url)
if match and validate_url(match.group(1))[0] is True:
url = match.group(1)
logging.debug('taking url: %s', url)
# too long and garbled URLs e.g. due to quotes URLs
# https://github.com/cocrawler/cocrawler/blob/main/cocrawler/urls.py
if len(url) > 500: # arbitrary choice
match = re.match(r'(.*?)[<>"\'\r\n ]', url)
if match:
url = match.group(1)
if len(url) > 500:
logging.debug('invalid-looking link %s of length %d',
url[:50] + '...', len(url))
# trailing ampersand
url = url.strip('&')
# trailing slashes in URLs without path or in embedded URLs
if url.count('/') == 3 or url.count('://') > 1:
url = url.rstrip('/')
# lower
# url = url.lower()
return url
def clean_query(parsed_url, strict=False, language=None):
'''Strip unwanted query elements'''
if len(parsed_url.query) > 0:
qdict = parse_qs(parsed_url.query)
newqdict = OrderedDict()
for qelem in sorted(qdict.keys()):
teststr = qelem.lower()
# control param
if strict is True and \
teststr not in ALLOWED_PARAMS and teststr not in CONTROL_PARAMS:
continue
# control language
if language is not None and teststr in CONTROL_PARAMS:
found_lang = str(qdict[qelem][0])
if (language == 'de' and found_lang not in TARGET_LANG_DE) or \
(language == 'en' and found_lang not in TARGET_LANG_EN) or \
found_lang != language:
logging.debug('bad lang: %s %s %s', language, qelem, found_lang)
raise ValueError
# insert
newqdict[qelem] = qdict[qelem]
newstring = urlencode(newqdict, doseq=True)
parsed_url = parsed_url._replace(query=newstring)
return parsed_url
def normalize_url(parsed_url, strict=False, language=None):
'''Takes a URL string or a parsed URL and returns a (basically) normalized URL string'''
if not isinstance(parsed_url, ParseResult):
parsed_url = urlparse(parsed_url)
# port
if parsed_url.port is not None and parsed_url.port in (80, 443):
parsed_url = parsed_url._replace(netloc=NETLOC_RE.sub('', parsed_url.netloc))
# path: https://github.com/saintamh/alcazar/blob/master/alcazar/utils/urls.py
newpath = PATH1.sub('/', parsed_url.path)
# Leading /../'s in the path are removed
newpath = PATH2.sub('', newpath)
# fragment
if strict is True:
newfragment = ''
else:
newfragment = parsed_url.fragment
# lowercase + remove fragments
parsed_url = parsed_url._replace(
scheme=parsed_url.scheme.lower(),
netloc=parsed_url.netloc.lower(),
path=newpath,
fragment=newfragment
)
# strip unwanted query elements
parsed_url = clean_query(parsed_url, strict, language)
# rebuild
return parsed_url.geturl()
| adbar/url-tools | courlan/clean.py | Python | gpl-2.0 | 5,520 | 0.002899 |
#!/usr/bin/python
import sys
sys.path.append("../../")
#import pyRay as ra
import pyRay.scene as scn
# TODO : how to pass arguments from function header?
object1 = ("obj1",(), [( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),( "S","sdSphere","%s",(1.2,) )])
object2 = ("obj1",("f","f3"),[( "U","sdBox" ,"%s",("2",) ),( "S","sdSphere","%s",("1",) )])
object3 = ("obj2",("f","f2"),[( "U","sdBox" ,"%s",(("2",1.0),) ),( "S","sdSphere","%s",("1",) )])
scene = [
( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),
( "S","sdSphere","%s",(1.2,) ),
]
scene_src = scn.parseSceneList(scene)
print scene_src
| ProkopHapala/SimpleSimulationEngine | python/pyRay/tests/testSceneList.py | Python | mit | 624 | 0.110577 |
#!/usr/bin/env python
"""Flows to collect file contents and metadata."""
from typing import Any, Mapping, Optional
from grr_response_core import config
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import flow_base
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
_MAX_FILE_SIZE = 1024 * 1024 * 1024 * 10 # 10 GiB.
# Although MultiGetFileLogic is a leaky, complex, and overall problematic Mixin
# it seems to be best choice to fetch the stat, hashes, and contents of a file.
# At the time of writing, none of the flows exposed all three to the caller in
# a sensible way.
class CollectSingleFile(transfer.MultiGetFileLogic, flow_base.FlowBase):
"""Fetches contents of a single file from the specified absolute path."""
friendly_name = "File content"
category = "/Filesystem/"
args_type = rdf_file_finder.CollectSingleFileArgs
result_types = (rdf_file_finder.CollectSingleFileResult,)
progress_type = rdf_file_finder.CollectSingleFileProgress
behaviours = flow_base.BEHAVIOUR_DEBUG
def GetProgress(self) -> rdf_file_finder.CollectSingleFileProgress:
return self.state.progress
def Start(self):
super().Start(file_size=self.args.max_size_bytes)
self.state.progress = rdf_file_finder.CollectSingleFileProgress(
status=rdf_file_finder.CollectSingleFileProgress.Status.IN_PROGRESS)
pathspec = rdf_paths.PathSpec.OS(path=self.args.path)
self.StartFileFetch(pathspec)
def ReceiveFetchedFile(self,
stat_entry,
hash_obj,
request_data=None,
is_duplicate=False):
"""See MultiGetFileLogic."""
del request_data, is_duplicate # Unused.
result = rdf_file_finder.CollectSingleFileResult(
stat=stat_entry, hash=hash_obj)
self.SendReply(result)
self.state.progress.result = result
self.state.progress.status = (
rdf_file_finder.CollectSingleFileProgress.Status.COLLECTED)
def FileFetchFailed(self,
pathspec: rdf_paths.PathSpec,
request_data: Any = None,
status: Optional[rdf_flow_objects.FlowStatus] = None):
"""See MultiGetFileLogic."""
if (self.client_os == "Windows" and
pathspec.pathtype == rdf_paths.PathSpec.PathType.OS):
# Retry with raw filesystem access on Windows,
# the file might be locked for reads.
raw_pathspec = rdf_paths.PathSpec(
path=self.args.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"])
self.StartFileFetch(raw_pathspec)
elif status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype)
# TODO: this is a really bad hack and should be fixed by
# passing the 'not found' status in a more structured way.
if "File not found" in status.error_message:
self.state.progress.status = rdf_file_finder.CollectSingleFileProgress.Status.NOT_FOUND
else:
self.state.progress.status = rdf_file_finder.CollectSingleFileProgress.Status.FAILED
self.state.progress.error_description = error_description
raise flow_base.FlowError(error_description)
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype))
self.state.progress.status = rdf_file_finder.CollectSingleFileProgress.Status.FAILED
self.state.progress.error_description = error_description
raise flow_base.FlowError(error_description)
@classmethod
def GetDefaultArgs(cls, username=None):
"""See base class."""
del username # Unused.
return rdf_file_finder.CollectSingleFileArgs(
path="", max_size_bytes="1 GiB")
# Although MultiGetFileLogic is a leaky, complex, and overall problematic Mixin
# it seems to be best choice to fetch the stat, hashes, and contents of a file.
# At the time of writing, none of the flows exposed all three to the caller in
# a sensible way.
class CollectFilesByKnownPath(transfer.MultiGetFileLogic, flow_base.FlowBase):
"""Fetches specified absolute path file contents."""
friendly_name = "File contents by exact path"
category = "/Filesystem/"
behaviours = flow_base.BEHAVIOUR_DEBUG
args_type = rdf_file_finder.CollectFilesByKnownPathArgs
result_types = (rdf_file_finder.CollectFilesByKnownPathResult,)
progress_type = rdf_file_finder.CollectFilesByKnownPathProgress
def GetProgress(self) -> rdf_file_finder.CollectFilesByKnownPathProgress:
return self.state.progress
def Start(self):
super().Start(file_size=_MAX_FILE_SIZE)
self.state.progress = rdf_file_finder.CollectFilesByKnownPathProgress(
num_in_progress=0,
num_raw_fs_access_retries=0,
num_collected=0,
num_failed=0,
)
if self.args.collection_level == rdf_file_finder.CollectFilesByKnownPathArgs.CollectionLevel.STAT:
self.state.stop_at_stat = True
elif self.args.collection_level == rdf_file_finder.CollectFilesByKnownPathArgs.CollectionLevel.HASH:
self.state.stop_at_hash = True
for path in self.args.paths:
pathspec = rdf_paths.PathSpec.OS(path=path)
self.StartFileFetch(
pathspec, request_data=dict(requested_pathspec=pathspec))
self.state.progress.num_in_progress += 1
def ReceiveFetchedFileStat(self,
stat_entry: rdf_client_fs.StatEntry,
request_data: Optional[Mapping[str, Any]] = None):
"""This method will be called for each new file stat successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
if self.state.stop_at_stat:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
else:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry, status=status)
self.SendReply(result)
def ReceiveFetchedFileHash(self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None):
"""This method will be called for each new file hash successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
if self.state.stop_at_hash:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
else:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry, hash=file_hash, status=status)
self.SendReply(result)
def ReceiveFetchedFile(self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None,
is_duplicate: bool = False):
"""This method will be called for each new file successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
is_duplicate: If True, the file wasn't actually collected as its hash was
found in the filestore.
"""
del request_data, is_duplicate # Unused.
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry,
hash=file_hash,
status=rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED)
self.SendReply(result)
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
def FileFetchFailed(self,
pathspec: rdf_paths.PathSpec,
request_data: Optional[Mapping[str, Any]] = None,
status: Optional[rdf_flow_objects.FlowStatus] = None):
"""This method will be called when stat or hash requests fail.
Args:
pathspec: Pathspec of a file that failed to be fetched.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
status: FlowStatus that contains more error details.
"""
requested_pathspec = request_data["requested_pathspec"]
if (self.client_os == "Windows" and
pathspec.pathtype == rdf_paths.PathSpec.PathType.OS):
# Retry with raw filesystem access on Windows, the file might be locked
# for reads.
raw_pathspec = rdf_paths.PathSpec(
path=requested_pathspec.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"])
self.StartFileFetch(
raw_pathspec, request_data=dict(requested_pathspec=raw_pathspec))
self.state.progress.num_raw_fs_access_retries += 1
else:
if status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype)
# TODO: This is a really bad hack and should be fixed by
# passing the 'not found' status in a more structured way.
if "File not found" in status.error_message:
file_status = rdf_file_finder.CollectFilesByKnownPathResult.Status.NOT_FOUND
else:
file_status = rdf_file_finder.CollectFilesByKnownPathResult.Status.FAILED
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype))
file_status = rdf_file_finder.CollectFilesByKnownPathResult.Status.FAILED
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=rdf_client_fs.StatEntry(pathspec=requested_pathspec),
error=error_description,
status=file_status,
)
self.SendReply(result)
self.state.progress.num_in_progress -= 1
self.state.progress.num_failed += 1
class CollectMultipleFiles(transfer.MultiGetFileLogic, flow_base.FlowBase):
"""Fetches contents of files by searching for path expressions."""
friendly_name = "Collect multiple files"
category = "/Filesystem/"
args_type = rdf_file_finder.CollectMultipleFilesArgs
result_types = (rdf_file_finder.CollectMultipleFilesResult,)
progress_type = rdf_file_finder.CollectMultipleFilesProgress
behaviours = flow_base.BEHAVIOUR_DEBUG
MAX_FILE_SIZE = 1024 * 1024 * 1024 * 10 # 10GiB
def GetProgress(self) -> rdf_file_finder.CollectMultipleFilesProgress:
return self.state.progress
def Start(self):
"""See base class."""
super().Start(file_size=self.MAX_FILE_SIZE)
self.state.progress = rdf_file_finder.CollectMultipleFilesProgress(
num_found=0,
num_in_progress=0,
num_raw_fs_access_retries=0,
num_collected=0,
num_failed=0,
)
conditions = []
if self.args.HasField("modification_time"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.MODIFICATION_TIME,
modification_time=self.args.modification_time,
))
if self.args.HasField("access_time"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.ACCESS_TIME,
access_time=self.args.access_time,
))
if self.args.HasField("inode_change_time"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.INODE_CHANGE_TIME,
inode_change_time=self.args.inode_change_time,
))
if self.args.HasField("size"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.SIZE,
size=self.args.size,
))
if self.args.HasField("ext_flags"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.EXT_FLAGS,
ext_flags=self.args.ext_flags,
))
if self.args.HasField("contents_regex_match"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.CONTENTS_REGEX_MATCH,
contents_regex_match=self.args.contents_regex_match,
))
if self.args.HasField("contents_literal_match"):
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.CONTENTS_LITERAL_MATCH,
contents_literal_match=self.args.contents_literal_match,
))
file_finder_args = rdf_file_finder.FileFinderArgs(
paths=self.args.path_expressions,
pathtype=rdf_paths.PathSpec.PathType.OS,
conditions=conditions,
action=rdf_file_finder.FileFinderAction.Stat())
self.CallFlow(
file_finder.ClientFileFinder.__name__,
flow_args=file_finder_args,
next_state=self.ProcessFiles.__name__)
def ProcessFiles(self, responses):
if not responses.success:
raise flow_base.FlowError(responses.status.error_message)
for response in responses:
pathspec = response.stat_entry.pathspec
self.StartFileFetch(pathspec, request_data=dict(original_result=response))
self.state.progress.num_found += 1
self.state.progress.num_in_progress += 1
def ReceiveFetchedFile(self,
stat_entry,
hash_obj,
request_data=None,
is_duplicate=False):
"""See MultiGetFileLogic."""
del request_data, is_duplicate # Unused.
result = rdf_file_finder.CollectMultipleFilesResult(
stat=stat_entry,
hash=hash_obj,
status=rdf_file_finder.CollectMultipleFilesResult.Status.COLLECTED)
self.SendReply(result)
self.state.progress.num_in_progress = max(
0, self.state.progress.num_in_progress - 1)
self.state.progress.num_collected += 1
def FileFetchFailed(self,
pathspec: rdf_paths.PathSpec,
request_data: Any = None,
status: Optional[rdf_flow_objects.FlowStatus] = None):
"""See MultiGetFileLogic."""
original_result = request_data["original_result"]
if (self.client_os == "Windows" and
pathspec.pathtype == rdf_paths.PathSpec.PathType.OS):
# Retry with raw filesystem access on Windows,
# the file might be locked for reads.
raw_pathspec = rdf_paths.PathSpec(
path=self.args.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"])
self.StartFileFetch(raw_pathspec)
self.state.progress.num_raw_fs_access_retries += 1
else:
if status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype)
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype))
result = rdf_file_finder.CollectMultipleFilesResult(
stat=original_result.stat_entry,
error=error_description,
status=rdf_file_finder.CollectMultipleFilesResult.Status.FAILED,
)
self.SendReply(result)
self.state.progress.num_in_progress = max(
0, self.state.progress.num_in_progress - 1)
| google/grr | grr/server/grr_response_server/flows/file.py | Python | apache-2.0 | 16,923 | 0.006736 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Count
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from rapidsms.models import Contact
from groups.models import Group
from groups.forms import GroupForm, ContactForm
@login_required
def list_groups(request):
groups = Group.objects.annotate(count=Count('contacts')).order_by('name')
return render(request, 'groups/groups/list.html', {
'groups': groups,
})
@login_required
@transaction.commit_on_success
def create_edit_group(request, group_id=None):
group = None
if group_id:
group = get_object_or_404(Group, pk=group_id)
if not group.is_editable:
return HttpResponseForbidden('Access denied')
if request.method == 'POST':
form = GroupForm(request.POST, instance=group)
if form.is_valid():
form.save()
messages.info(request, 'Group saved successfully')
return HttpResponseRedirect(reverse('list-groups'))
else:
form = GroupForm(instance=group)
return render(request, 'groups/groups/create_edit.html', {
'form': form,
'group': group,
})
@login_required
@transaction.commit_on_success
def delete_group(request, group_id):
group = get_object_or_404(Group, pk=group_id)
if not group.is_editable:
return HttpResponseForbidden('Access denied')
if request.method == 'POST':
group.delete()
messages.info(request, 'Group successfully deleted')
return HttpResponseRedirect(reverse('list-groups'))
return render(request, 'groups/groups/delete.html', {
'group': group,
})
@login_required
def list_contacts(request):
contacts = Contact.objects.all().order_by('name')
return render(request, 'groups/contacts/list.html', {
'contacts': contacts,
})
@login_required
@transaction.commit_on_success
def create_edit_contact(request, contact_id=None):
contact = None
if contact_id:
contact = get_object_or_404(Contact, pk=contact_id)
if request.method == 'POST':
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
form.save()
messages.info(request, 'Contact saved successfully')
return HttpResponseRedirect(reverse('list-contacts'))
else:
form = ContactForm(instance=contact)
return render(request, 'groups/contacts/create_edit.html', {
'form': form,
'contact': contact,
})
@login_required
@transaction.commit_on_success
def delete_contact(request, contact_id):
contact = get_object_or_404(Contact, pk=contact_id)
if request.method == 'POST':
contact.delete()
messages.info(request, 'Contact successfully deleted')
return HttpResponseRedirect(reverse('list-contacts'))
return render(request, 'groups/contacts/delete.html', {
'contact': contact,
})
| caktus/rapidsms-groups | groups/views.py | Python | bsd-3-clause | 3,191 | 0 |
T = raw_input().lower()
vowels = "aeiouy"
output = ""
for i in range(0,len(T)):
if T[i] not in vowels:
output += "." + T[i]
print output
| Dawny33/Code | Code_Forces/A_string_task.py | Python | gpl-3.0 | 152 | 0.006579 |
#!python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Additional presubmit script. This will be run for changes to files in this
# subdirectory, as well as the root syzygy/PRESUBMIT.py.
#
# This script will be read as a string and intepreted, so __file__ is not
# available. However, it is guaranteed to be run with this file's directory as
# the current working directory.
def CheckChange(input_api, output_api, dummy_committing):
# We only check Python files in this tree. The others are checked by the
# PRESUBMIT in the root Syzygy directory.
white_list = [r'^.*\.py$']
black_list = []
disabled_warnings = []
results = input_api.canned_checks.RunPylint(
input_api,
output_api,
white_list=white_list,
black_list=black_list,
disabled_warnings=disabled_warnings)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api, False)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api, True)
| wangming28/syzygy | syzygy/scripts/test_bot/PRESUBMIT.py | Python | apache-2.0 | 1,582 | 0.005689 |
"""Remove EASFolderSyncStatus + Folder rows for folders we never sync
Revision ID: 2a748760ac63
Revises: 4af5952e8a5b
Create Date: 2014-07-19 00:28:08.258857
"""
# revision identifiers, used by Alembic.
revision = 'bb4f204f192'
down_revision = '2a748760ac63'
from inbox.ignition import engine
from inbox.models.session import session_scope
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.exc import NoResultFound
Base = declarative_base()
Base.metadata.reflect(engine)
def upgrade():
if 'easfoldersyncstatus' in Base.metadata.tables:
from inbox.models.backends.eas import EASFolderSyncStatus
from inbox.models import Folder
from inbox.util.eas.constants import SKIP_FOLDERS
with session_scope(versioned=False, ignore_soft_deletes=False) as \
db_session:
statuses = db_session.query(EASFolderSyncStatus).filter(
EASFolderSyncStatus.eas_folder_type.in_(SKIP_FOLDERS)).all()
for s in statuses:
db_session.delete(s)
db_session.delete(s.folder)
try:
for status in db_session.query(EASFolderSyncStatus)\
.join(Folder).filter(
Folder.name == 'RecipientInfo').all():
db_session.delete(status)
db_session.delete(status.folder)
except NoResultFound:
pass
db_session.commit()
def downgrade():
raise Exception("Nope, not needed.")
| rmasters/inbox | migrations/versions/061_remove_easfoldersyncstatus_folder_rows_.py | Python | agpl-3.0 | 1,547 | 0.002586 |
# -*- coding: utf-8 -*-
# Copyright 2013 Julien Goret <jgoret@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords, split_keyword
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import urllib
logger = logging.getLogger("subliminal")
class Usub(ServiceBase):
server_url = 'http://www.u-sub.net/sous-titres'
site_url = 'http://www.u-sub.net/'
api_based = False
languages = language_set(['fr'])
videos = [Episode]
require_video = False
#required_features = ['permissive']
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None):
## Check if we really got informations about our episode
if series and season and episode:
request_series = series.lower().replace(' ', '-')
if isinstance(request_series, unicode):
request_series = request_series.encode('utf-8')
logger.debug(u'Getting subtitles for %s season %d episode %d with language %r' % (series, season, episode, languages))
r = self.session.get('%s/%s/saison_%s' % (self.server_url, urllib.quote(request_series),season))
if r.status_code == 404:
print "Error 404"
logger.debug(u'Could not find subtitles for %s' % (series))
return []
else:
print "One or more parameter missing"
raise ServiceError('One or more parameter missing')
## Check if we didn't got an big and nasty http error
if r.status_code != 200:
print u'Request %s returned status code %d' % (r.url, r.status_code)
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
## Editing episode informations to be able to use it with our search
if episode < 10 :
episode_num='0'+str(episode)
else :
episode_num=str(episode)
season_num = str(season)
series_name = series.lower().replace(' ', '.')
possible_episode_naming = [season_num+'x'+episode_num,season_num+episode_num]
## Actually parsing the page for the good subtitles
soup = BeautifulSoup(r.content, self.required_features)
subtitles = []
subtitles_list = soup.find('table', {'id' : 'subtitles_list'})
link_list = subtitles_list.findAll('a', {'class' : 'dl_link'})
for link in link_list :
link_url = link.get('href')
splited_link = link_url.split('/')
filename = splited_link[len(splited_link)-1]
for episode_naming in possible_episode_naming :
if episode_naming in filename :
for language in languages:
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s' % (link_url))
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
## All downloaded files are zip files
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = Usub
| kikokubo/Sick-Beard-TPB | lib/subliminal/services/usub.py | Python | gpl-3.0 | 4,305 | 0.00813 |
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SPLINE:
Ortho = 'ortho'
| ipa-led/airbus_coop | airbus_docgen/src/airbus_docgen/digraph/spline.py | Python | apache-2.0 | 717 | 0.001395 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import importlib
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = "accounts"
verbose_name = "Accounts"
version = "0.1.1"
def ready(self):
importlib.import_module("accounts.getters")
importlib.import_module("accounts.receivers")
| unho/pootle | pootle/apps/accounts/apps.py | Python | gpl-3.0 | 566 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
## ##
# Author: Peter Manev #
# peter.manev@openinfosecfoundation.org #
## ##
## !!! IMPORTANT - LATEST DEV Scapy is needed !!!
# REMOVE your current scapy installation !!!
# then ->
# hg clone http://hg.secdev.org/scapy-com
# python setup.py install
from scapy.all import *
import sys, urllib , os, subprocess, random
from itertools import *
import Global_Vars
class pacifyIpv4Http:
def writeIPv4HttpRule(self, sid_id_http, http_method, http_uri_string, \
http_content_all, directory, src_name):
##creating and writing a sid.rules file
rule_file = open('%s/%s.rules' % (directory,sid_id_http), 'w+')
content_http_uri_string_ready_for_rule = None
content_http_uri_string_ready_for_rule = ""
if (len(http_uri_string) > 250):
content_http_uri_string_array = [http_uri_string[i:i+250] for i in range(0, len(http_uri_string), 250)]
for i in content_http_uri_string_array:
i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\
replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\
replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|')
content_http_uri_string_ready_for_rule = \
content_http_uri_string_ready_for_rule + \
("content:\"%s\"; http_raw_uri; " % (i))
else:
http_uri_string = http_uri_string.replace('|', '|7C|').\
replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\
replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\
replace('\r', '|0d|').replace('\n', '|0a|')
content_http_uri_string_ready_for_rule = \
("content:\"%s\"; http_raw_uri; " % (http_uri_string))
content_all_ready_for_rule = None
content_all_ready_for_rule = ""
if (len(http_content_all) > 250):
content_http_all_array = [http_content_all[i:i+250] for i in range(0, len(http_content_all), 250)]
for i in content_http_all_array:
i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\
replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\
replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|')
content_all_ready_for_rule = \
content_all_ready_for_rule + \
("content:\"%s\"; " % (i))
else:
http_content_all = http_content_all.replace('|', '|7C|').\
replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\
replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\
replace('\r', '|0d|').replace('\n', '|0a|')
content_all_ready_for_rule = \
("content:\"%s\"; " % (http_content_all))
rule_file.write ( \
"alert http any any -> any any (msg:\"HTTP requests tests - sid %s , \
pcap - %s \"; \
content:\"%s\"; http_method; %s %s \
reference:url,%s; sid:%s; rev:1;)" % \
(sid_id_http, sid_id_http, http_method, \
content_http_uri_string_ready_for_rule, \
content_all_ready_for_rule, \
src_name, sid_id_http) )
rule_file.close()
def rebuildIPv4HttpSessionExtraTcpSAs(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#We rebuild the http session , however inject some extra SAs
session_packets = list()
session_packets_fragmented = list()
#print packet[TCP][Raw]
#print packet[Ether].src
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint((2**10),(2**16))
# We make sure ack_num_extra* are never going to be the same numbering
# as ack_num
ack_num_extra_1 = random.randint((2**22)+1 , (2**32)-1)
ack_num_extra_2 = random.randint((2**16)+1,(2**22)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack_extra_1 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \
type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \
dport=portsrc, seq=ack_num_extra_1, ack=syn.seq+1)
synack_extra_2 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \
type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \
dport=portsrc, seq=ack_num_extra_2, ack=syn.seq+1)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
p_frag_synack = fragment(synack, fragsize=1 )
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# BEFORE the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack_extra_1)
session_packets.append(synack_extra_2)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
random.shuffle(p_frag_synack)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# AFTER the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(synack_extra_1)
session_packets.append(synack_extra_2)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
random.shuffle(p_frag_synack)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# BEFORE and AFTER the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack_extra_1)
session_packets.append(synack)
session_packets.append(synack_extra_2)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
random.shuffle(p_frag_synack)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSession(self, packet, results_directory, sid_id_http, \
src_name, repo_name):
session_packets = list()
session_packets_fragmented = list()
#print packet[TCP][Raw]
#print packet[Ether].src
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionDot1Q(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#Dot1Q VLAN tags
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1111)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1111)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_Dot1Q_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_Dot1Q_untagged = fragment(p_Dot1Q_untagged, fragsize=10)
# Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet
# Everything else is the same and stays the same
p_Dot1Q_tagged_wrong = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_tagged_wrong.tags = Dot1Q(vlan=3333)
##This is the actual data packet that will be sent containing the payload
#- fragmented.
p_frag_Dot1Q_tagged_wrong = fragment(p_Dot1Q_tagged_wrong, fragsize=10 )
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags for one or more fragments of the same data packet !
p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_data_frag.tags = Dot1Q(vlan=1111)
# We fragment the data packet, then we will play around with the fragments
# VLAN tags
p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333)
# We fragment the data packet , but we make one fragment untagged.
# VLAN tag missing
p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_missing[3].tags = Untagged()
# We fragment the data packet , but we make ONLY one fragment tagged
# with the correct VLAN tag
p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 )
for frag in p_frag_Dot1Q_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1111)
#We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1Q(vlan=1111)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1111)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1111)
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap"\
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap"\
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the wrong Dot1Q VLAN tags in the data packet
# and the creation of the pcaps designed for not alerting
# due to changed (fake/hopped) VLAN tag in the same flow
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_Dot1Q_tagged_wrong)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q_tagged_wrong-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_tagged_wrong):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_tagged_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the missing Dot1Q VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_Dot1Q_untagged)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_untagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionDot1QWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#Dot1Q VLAN tags
#Here we will change the VLAN tags on one or more frgaments
#of the data packet
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1111)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1111)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags for one or more fragments of the same data packet !
p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_data_frag.tags = Dot1Q(vlan=1111)
# We fragment the data packet, then we will play around with the fragments
# VLAN tags - one fragment has the wrong VLAN tag
p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333)
# We fragment the data packet , but we make one fragment untagged.
# VLAN tag missing
p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_missing[3].tags = Untagged()
# We fragment the data packet , but we make ONLY one fragment tagged
# with the correct VLAN tag
p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 )
for frag in p_frag_Dot1Q_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1111)
#We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1Q(vlan=1111)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1111)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1111)
##
# Here we start with chnaging the Dot1Q VLAN tags in the FRAGMENTS
# of the data packetand the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the fragments of data in the same flow.
##
## one fragment from the data packet has a missing VLAN tag
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_missing):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_missing)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one frgament from the data packet has the wrong VLAN tag
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_wrong):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## all frgaments from the data packet have no VLAN tags BUT one
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_one_tagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionQinQ(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#Dot1Q double tags (vlans) = QinQ
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_QinQ_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_QinQ_untagged = fragment(p_QinQ_untagged, fragsize=10)
# QinQ reversed - we reverse/switch the VLAN tags in the data packet
# Everything else is the same and stays the same
p_QinQ_tag_reversed = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_tag_reversed.tags = Dot1AD(vlan=4094)/Dot1Q(vlan=666)
p_QinQ_tag_reversed.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented, QinQ reversed/siwtched tags
p_frag_QinQ_tag_reversed = fragment(p_QinQ_tag_reversed, fragsize=10 )
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finalAck.tags[Dot1Q].tpid = 0x88a8
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the reversed QinQ VLAN tags
# and the creation of the pcaps designed for not alerting
# due to switched (fake) VLAN tags in the same flow
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_QinQ_tag_reversed)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_tag_reversed):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_tag_reversed)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the missing Dot1Q VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_QinQ_untagged)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name),
session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_untagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionQinQWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#QinQ VLAN tags - double tags
#Here we will change the VLAN tags on one or more frgaments
#of the QinQ data packet
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p.tags[Dot1Q].tpid = 0x88a8
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags (QinQ) for one or more fragments of the same data packet !
p_QinQ_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_data_frag.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p_QinQ_data_frag.tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the inner Dot1Q layer
p_frag_QinQ_data_frag_wrong_dot1q = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=666)/Dot1Q(vlan=777)
p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the outer 802.1AD layer
p_frag_QinQ_data_frag_wrong_dot1ad = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=4094)
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
p_frag_QinQ_data_frag_wrong_both = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=444)/Dot1Q(vlan=555)
p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
p_frag_QinQ_data_frag_missing_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged()
## We fragment the data packet , but we make one fragment with reversed
# VLAN tags
p_frag_QinQ_data_frag_reversed_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_reversed_tags[3].tags = \
Dot1AD(vlan=4094)/Dot1Q(vlan=666)
p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make ONLY one fragment QinQ tagged
# with the correct VLAN tags
p_frag_QinQ_data_frag_one_tagged = fragment(p_QinQ_data_frag, fragsize=10 )
for frag in p_frag_QinQ_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_QinQ_data_frag_one_tagged[3].tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p_frag_QinQ_data_frag_one_tagged[3].tags[Dot1Q].tpid = 0x88a8
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finalAck.tags[Dot1Q].tpid = 0x88a8
##
# Here we start with chnaging the QinQ VLAN tags in the FRAGMENTS
# of the data packetand the creation of the pcaps designed for not alerting
# due to missing/reversed/nonexisting VLAN tags in the fragments of
# data in the same flow.
##
## one fragment from the data packet has a wrong VLAN tag - dot1Q tag.
# The other tag (dot1AD- S-VLAN/Carrier VLAN) is correct
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1q):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment from the data packet has a wrong VLAN tag - dot1AD tag
# -> S-VLAN/Carrier VLAN. The other tag (dot1q) is correct
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1ad):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one frgament from the data packet has both VLAN tag IDs wrong
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_both):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_both)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment of the data packet has NO VLAN tags
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_missing_tags):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_missing_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags switched/reversed
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_reversed_tags):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_reversed_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags correct.
# The rest do not.
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_one_tagged_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_one_tagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_one_tagged_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_one_tagged_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpill(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillDot1Q(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#Dot1Q - VLAN tags cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1155)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1155)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_Dot1Q_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_Dot1Q_untagged = fragment(p_Dot1Q_untagged, fragsize=10)
# Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet
# Everything else is the same and stays the same
p_Dot1Q_tagged_wrong = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_tagged_wrong.tags = Dot1Q(vlan=3355)
##This is the actual data packet that will be sent containing the payload
#- fragmented, QinQ reversed/siwtched tags
p_frag_Dot1Q_tagged_wrong = fragment(p_Dot1Q_tagged_wrong, fragsize=10 )
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1Q(vlan=1155)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1155)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1155)
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the wrong Dot1Q VLAN tags in the data packet
# and the creation of the pcaps designed for not alerting
# due to changed (fake/hopped) VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_Dot1Q_tagged_wrong)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_tagged_wrong):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_tagged_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the missing Dot1Q VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_Dot1Q_untagged)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_untagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillDot1QWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#Dot1Q - VLAN tags cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1155)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1155)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags for one or more fragments of the same data packet !
p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_data_frag.tags = Dot1Q(vlan=1155)
# We fragment the data packet, then we will play around with the fragments
# VLAN tags - one fragment has the wrong VLAN tag
p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333)
# We fragment the data packet , but we make one fragment untagged.
# VLAN tag missing
p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_missing[3].tags = Untagged()
# We fragment the data packet , but we make ONLY one fragment tagged
# with the correct VLAN tag
p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 )
for frag in p_frag_Dot1Q_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1155)
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1Q(vlan=1155)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1155)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1155)
##
# Here we start with chnaging the Dot1Q VLAN tags in the FRAGMENTS
# of the data packetand the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the fragments of data in the same flow.
##
## one fragment from the data packet has a missing VLAN tag
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_missing):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_missing)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one frgament from the data packet has the wrong VLAN tag
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_wrong):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## all frgaments from the data packet have no VLAN tags BUT one
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_one_tagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillQinQ(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#QinQ - double VLAN tag cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_QinQ_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_QinQ_untagged = fragment(p_QinQ_untagged, fragsize=10)
# Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet
# Everything else is the same and stays the same
p_QinQ_tag_reversed = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_tag_reversed.tags = Dot1AD(vlan=4000)/Dot1Q(vlan=777)
p_QinQ_tag_reversed.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented, QinQ reversed/siwtched tags
p_frag_QinQ_tag_reversed = fragment(p_QinQ_tag_reversed, fragsize=10 )
## ONLY Dot1Q VLAN tag - present in the fragments (QinQ expected)
p_QinQ_tag_only_dot1q = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_tag_only_dot1q.tags = Dot1Q(vlan=1234)
#The actual fragmentation - only one VLAN tag - QinQ expected
p_frag_QinQ_tag_only_dot1q = fragment(p_QinQ_tag_only_dot1q, fragsize=10 )
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finalAck.tags[Dot1Q].tpid = 0x88a8
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the revrsed/switched QinQ VLAN tags in the data packet
# and the creation of the pcaps designed for not alerting
# due to changed (fake/hopped) VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_QinQ_tag_reversed)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_tag_reversed):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_tag_reversed)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the missing QinQ VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_QinQ_untagged)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name) , session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_untagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with only one VLAN tag found in the data packet
# QinQ VLAN tags expected
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_QinQ_tag_only_dot1q)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_tag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_tag_only_dotq-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_tag_only_dot1q):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_tag_only_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_tag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillQinQWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#QinQ - double VLAN tag cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p.tags[Dot1Q].tpid = 0x88a8
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags (QinQ) for one or more fragments of the same data packet !
p_QinQ_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_data_frag.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p_QinQ_data_frag.tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the outer 802.1AD layer
p_frag_QinQ_data_frag_wrong_dot1ad = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=888)
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the inner Dot1Q layer
p_frag_QinQ_data_frag_wrong_dot1q = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=333)/Dot1Q(vlan=4000)
p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we make one fragmanet tagged only with one VLAN
p_frag_QinQ_data_frag_only_dot1q = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_only_dot1q[3].tags = Dot1Q(vlan=1234)
## We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
p_frag_QinQ_data_frag_wrong_both = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=444)/Dot1Q(vlan=555)
p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
p_frag_QinQ_data_frag_missing_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged()
## We fragment the data packet , but we make one fragment with reversed
# VLAN tags
p_frag_QinQ_data_frag_reversed_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_reversed_tags[3].tags = \
Dot1AD(vlan=4000)/Dot1Q(vlan=777)
p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make ONLY one fragment QinQ tagged
# with the correct VLAN tags
p_frag_QinQ_data_frag_one_tagged = fragment(p_QinQ_data_frag, fragsize=10 )
for frag in p_frag_QinQ_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_QinQ_data_frag_one_tagged[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p_frag_QinQ_data_frag_one_tagged[3].tags[Dot1Q].tpid = 0x88a8
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finalAck.tags[Dot1Q].tpid = 0x88a8
##
# Here we start with chnaging the QinQ VLAN tags in the FRAGMENTS
# of the data packet and the creation of the pcaps designed for not alerting
# due to missing/reversed/nonexisting VLAN tags in the fragments of
# data in the same flow.
##
## one fragment from the data packet has a wrong VLAN tag - dot1Q tag.
# The other tag (dot1AD- S-VLAN/Carrier VLAN) is correct
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1q):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment from the data packet has a wrong VLAN tag - dot1AD tag
# -> S-VLAN/Carrier VLAN. The other tag (dot1q) is correct
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1ad):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## We make one frgament with only one VLAN tag (not double)
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_only_dot1q):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_only_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one frgament from the data packet has both VLAN tag IDs wrong
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_both):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_both)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment of the data packet has NO VLAN tags
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_missing_tags):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_missing_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags switched/reversed
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_reversed_tags):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_reversed_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags correct.
# The rest do not.
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_one_tagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def midstreamIPv4Http(self, fragit, results_directory, sid_id_http, \
src_name, repo_name):
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit_done = fragment(fragit, fragsize=10 )
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
def midstreamIPv4HttpDot1Q(self, fragit, results_directory, sid_id_http, \
src_name, repo_name):
#Using VLAN Tag - Dot1Q
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit[Ether].tags=Dot1Q(vlan=2222)
#one midstream packet in Dot1Q
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit)
fragit_done = fragment(fragit, fragsize=10 )
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
def midstreamIPv4HttpDot1QWrongTagInFragments(self, fragit, results_directory, \
sid_id_http, src_name, repo_name):
# Wrongly tagged fragments
# Using VLAN Tag - Dot1Q
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit[Ether].tags = Dot1Q(vlan=2222)
##
# one fragment has the wrong VLAN ID tag
##
fragit_done_wrong_dot1q_tag = fragment(fragit, fragsize=10 )
fragit_done_wrong_dot1q_tag[3].tags = Dot1Q(vlan=2299)
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_wrong_dot1q_tag)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done_wrong_dot1q_tag.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_wrong_dot1q_tag)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done_wrong_dot1q_tag)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_wrong_dot1q_tag)
##
# one fragment has no VLAN ID tag
##
fragit_done_no_dot1q_tag = fragment(fragit, fragsize=10 )
fragit_done_no_dot1q_tag[3].tags = Untagged()
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_no_dot1q_tag)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done_no_dot1q_tag.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_no_dot1q_tag)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done_no_dot1q_tag)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_no_dot1q_tag)
def midstreamIPv4HttpQinQ(self, fragit, results_directory, sid_id_http, \
src_name, repo_name):
#Using DOUBLE VLAN Tagging - QinQ
#Forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit.tags = Dot1AD(vlan=3333)/Dot1Q(vlan=1)
fragit.tags[Dot1Q].tpid = 0x88a8
#one midstream packet in QinQ
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit)
fragit_done = fragment(fragit, fragsize=10 )
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
def midstreamIPv4HttpQinQWrongTagInFragments(self, fragit, \
results_directory, sid_id_http, src_name, repo_name):
#Wrongly tagged fragments
#Using DOUBLE VLAN Tagging - QinQ
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit.tags = Dot1AD(vlan=3333)/Dot1Q(vlan=1)
fragit.tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we change the VLAN tag of
# the outer 802.1AD layer
##
p_frag_QinQ_data_frag_wrong_dot1ad = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=3333)/Dot1Q(vlan=777)
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we change the VLAN tag of
# the inner Dot1Q layer
##
p_frag_QinQ_data_frag_wrong_dot1q = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=1)
p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we make one fragmanet tagged only
# with one VLAN
##
p_frag_QinQ_data_frag_only_dot1q = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_only_dot1q[3].tags = Dot1Q(vlan=2345)
##
# We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
##
p_frag_QinQ_data_frag_wrong_both = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=111)/Dot1Q(vlan=222)
p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
##
p_frag_QinQ_data_frag_missing_tags = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged()
##
# We fragment the data packet , but we make one fragment with reversed
# VLAN tags
##
p_frag_QinQ_data_frag_reversed_tags = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_reversed_tags[3].tags = Dot1AD(vlan=1)/Dot1Q(vlan=3333)
p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we change the VLAN tag of
# the outer 802.1AD layer
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_wrong_dot1ad.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad)
##
# We fragment the data packet, we change the VLAN tag of
# the inner Dot1Q layer
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_wrong_dot1q.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q)
##
# We fragment the data packet, we make one fragmanet tagged only
# with one VLAN
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_only_dot1q.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_only_dot1q)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q)
##
# We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_both)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_wrong_both.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_both)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_wrong_both)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_both)
##
# We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_missing_tags)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_missing_tags.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_missing_tags)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_missing_tags)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_missing_tags)
##
# We fragment the data packet , but we make one fragment with reversed
# VLAN tags
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_reversed_tags.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_reversed_tags)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags)
def reconstructIPv4HttpPacket(self, packet):
# here we make the original HTTP packet into a just TCP packet
if packet.haslayer(IPv6):
ipsrc = "1.1.1.1"
ipdst = "9.9.9.9"
else:
ipsrc = packet[IP].src
ipdst = packet[IP].dst
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=packet[TCP].sport, \
dport=packet[TCP].dport, seq=packet.seq, ack=packet.ack)/packet[TCP][Raw]
return p
def incrementPcapId(self, action):
if action == "byOne":
Global_Vars.pcap_id = Global_Vars.pcap_id+1
return '{0:03}'.format(Global_Vars.pcap_id)
elif action == "clear":
Global_Vars.pcap_id = 000
return '{0:03}'.format(Global_Vars.pcap_id)
else:
sys.exit("Invalid argument for function incrementPcapId()")
def httpReWrite(self, scapy_load, FN, pcap_id, results_directory, \
source_name, sid_id_http, url_method, url_str, content_all, repository_name):
# writing the http request packet to pcap
# in regression script format
# 2002031-001-sandnet-public-tp-01.pcap - example
## 001 - starts here ##
ipv4_ready = self.reconstructIPv4HttpPacket(scapy_load[FN])
if Global_Vars.yaml_options['Protocols']['HTTP']['WriteRule']:
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Rules'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['Midstream']:
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne"), \
source_name, repository_name) , ipv4_ready)
self.midstreamIPv4Http(ipv4_ready, results_directory, sid_id_http, \
source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Midstream', 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['Dot1Q']:
self.midstreamIPv4HttpDot1Q(ipv4_ready, results_directory, sid_id_http, \
source_name, repository_name)
self.midstreamIPv4HttpDot1QWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Midstream', 'Dot1Q'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['QinQ']:
self.midstreamIPv4HttpQinQ(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.midstreamIPv4HttpQinQWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Midstream', 'QinQ'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Session']:
self.rebuildIPv4HttpSession(ipv4_ready, results_directory, sid_id_http, \
source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['ExtraTcpSA']:
self.rebuildIPv4HttpSessionExtraTcpSAs(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Dot1Q']:
self.rebuildIPv4HttpSessionDot1Q(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSessionDot1QWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Dot1Q'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['QinQ']:
self.rebuildIPv4HttpSessionQinQ(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSessionQinQWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory,'QinQ'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['SeqOverspill']:
self.rebuildIPv4HttpSeqOverSpill(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Dot1Q']:
self.rebuildIPv4HttpSeqOverSpillDot1Q(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSeqOverSpillDot1QWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Dot1Q'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['QinQ']:
self.rebuildIPv4HttpSeqOverSpillQinQ(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSeqOverSpillQinQWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory,'QinQ'), source_name)
def __init__(self, scapy_load, FN, pcap_id, results_directory, source_name, \
sid_id_http, url_method, url_str, content_all, repository_name):
self.scapy_load_to_pass = scapy_load
self.FN_to_pass = FN
self.pcap_id_to_pass = pcap_id
self.results_directory_to_pass = results_directory
self.source_name_to_pass = source_name
self.sid_id_http_to_pass = sid_id_http
self.url_method_to_pass = url_method
self.url_str_to_pass = url_str
self.content_all_to_pass = content_all
self.repository_name_to_pass = repository_name
# if HTTP over IPv4 is enabled in yaml
if Global_Vars.yaml_options['Protocols']['HTTP']['IPv4']:
self.httpReWrite( \
self.scapy_load_to_pass, self.FN_to_pass, self.pcap_id_to_pass, \
self.results_directory_to_pass, self.source_name_to_pass, \
self.sid_id_http_to_pass, self.url_method_to_pass, \
self.url_str_to_pass, self.content_all_to_pass, \
self.repository_name_to_pass )
| pevma/PtP | ProtoIPv4/IPv4_HTTP.py | Python | gpl-2.0 | 182,209 | 0.017381 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param container_settings: The settings for the container under which the
start task runs. When this is specified, all directories recursively below
the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the
node) are mapped into the container, all task environment variables are
mapped into the container, and the task command line is executed in the
container.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: ~azure.batch.models.UserIdentity
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and failure info details. If false, the
Batch service will not wait for the start task to complete. In this case,
other tasks can start executing on the compute node while the start task
is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None):
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
| AutorestCI/azure-sdk-for-python | azure-batch/azure/batch/models/start_task.py | Python | mit | 5,044 | 0.000595 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: uptimerobot
short_description: Pause and start Uptime Robot monitoring
description:
- This module will let you start and pause Uptime Robot Monitoring
author: "Nate Kingsley (@nate-kingsley)"
version_added: "1.9"
requirements:
- Valid Uptime Robot API Key
options:
state:
description:
- Define whether or not the monitor should be running or paused.
required: true
choices: [ "started", "paused" ]
monitorid:
description:
- ID of the monitor to check.
required: true
apikey:
description:
- Uptime Robot API key.
required: true
notes:
- Support for adding and removing monitors and alert contacts has not yet been implemented.
'''
EXAMPLES = '''
# Pause the monitor with an ID of 12345.
- uptimerobot:
monitorid: 12345
apikey: 12345-1234512345
state: paused
# Start the monitor with an ID of 12345.
- uptimerobot:
monitorid: 12345
apikey: 12345-1234512345
state: started
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
API_BASE = "http://api.uptimerobot.com/"
API_ACTIONS = dict(
status='getMonitors?',
editMonitor='editMonitor?'
)
API_FORMAT = 'json'
API_NOJSONCALLBACK = 1
CHANGED_STATE = False
SUPPORTS_CHECK_MODE = False
def checkID(module, params):
data = urlencode(params)
full_uri = API_BASE + API_ACTIONS['status'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult
def startMonitor(module, params):
params['monitorStatus'] = 1
data = urlencode(params)
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult['stat']
def pauseMonitor(module, params):
params['monitorStatus'] = 0
data = urlencode(params)
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult['stat']
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['started', 'paused']),
apikey=dict(required=True),
monitorid=dict(required=True)
),
supports_check_mode=SUPPORTS_CHECK_MODE
)
params = dict(
apiKey=module.params['apikey'],
monitors=module.params['monitorid'],
monitorID=module.params['monitorid'],
format=API_FORMAT,
noJsonCallback=API_NOJSONCALLBACK
)
check_result = checkID(module, params)
if check_result['stat'] != "ok":
module.fail_json(
msg="failed",
result=check_result['message']
)
if module.params['state'] == 'started':
monitor_result = startMonitor(module, params)
else:
monitor_result = pauseMonitor(module, params)
module.exit_json(
msg="success",
result=monitor_result
)
if __name__ == '__main__':
main()
| vaygr/ansible | lib/ansible/modules/monitoring/uptimerobot.py | Python | gpl-3.0 | 3,698 | 0.001622 |
# ==========================================================================
# This script provides a number of functions that are useful for handling
# CTA observations.
#
# Copyright (C) 2011-2013 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
from ctools import *
from gammalib import *
# ===================== #
# Simulate observations #
# ===================== #
def sim(obs, log=False, debug=False, seed=0, nbins=0, binsz=0.05, npix=200):
"""
Simulate events for all observations in the container.
Parameters:
obs - Observation container
Keywords:
log - Create log file(s)
debug - Create screen dump
seed - Seed value for simulations (default: 0)
nbins - Number of energy bins (default: 0=unbinned)
binsz - Pixel size for binned simulation (deg/pixel)
npix - Number of pixels in X and Y for binned simulation
"""
# Allocate ctobssim application and set parameters
sim = ctobssim(obs)
sim['seed'].integer(seed)
# Optionally open the log file
if log:
sim.logFileOpen()
# Optionally switch-on debugging model
if debug:
sim["debug"].boolean(True)
# Run ctobssim application. This will loop over all observations in the
# container and simulation the events for each observation. Note that
# events are not added together, they still apply to each observation
# separately.
sim.run()
# Binned option?
if nbins > 0:
# Determine common energy boundaries for observations
emin = None
emax = None
for run in sim.obs():
run_emin = run.events().ebounds().emin().TeV()
run_emax = run.events().ebounds().emax().TeV()
if emin == None:
emin = run_emin
elif run_emin > emin:
emin = run_emin
if emax == None:
emax = run_emax
elif run_emax > emax:
emax = run_emax
# Allocate ctbin application and set parameters
bin = ctbin(sim.obs())
bin["emin"].real(emin)
bin["emax"].real(emax)
bin["enumbins"].integer(nbins)
bin["usepnt"].boolean(True) # Use pointing for map centre
bin["nxpix"].integer(npix)
bin["nypix"].integer(npix)
bin["binsz"].real(binsz)
bin["coordsys"].string("GAL")
bin["proj"].string("TAN")
# Optionally open the log file
if log:
bin.logFileOpen()
# Optionally switch-on debugging model
if debug:
bin["debug"].boolean(True)
# Run ctbin application. This will loop over all observations in
# the container and bin the events in counts maps
bin.run()
# Make a deep copy of the observation that will be returned
# (the ctbin object will go out of scope one the function is
# left)
obs = bin.obs().copy()
else:
# Make a deep copy of the observation that will be returned
# (the ctobssim object will go out of scope one the function is
# left)
obs = sim.obs().copy()
# Delete the simulation
del sim
# Return observation container
return obs
# ================ #
# Fit observations #
# ================ #
def fit(obs, log=False, debug=False):
"""
Perform maximum likelihood fitting of observations in the container.
Parameters:
obs - Observation container
Keywords:
log - Create log file(s)
debug - Create screen dump
"""
# Allocate ctlike application
like = ctlike(obs)
# Optionally open the log file
if log:
like.logFileOpen()
# Optionally switch-on debugging model
if debug:
like["debug"].boolean(True)
# Run ctlike application.
like.run()
# Return observations
return like
# ================= #
# Create counts map #
# ================= #
def cntmap(obs, proj="TAN", coord="GAL", xval=0.0, yval=0.0, \
binsz=0.05, nxpix=200, nypix=200, \
outname="cntmap.fits"):
"""
Creates a counts map by combining the events of all observations.
The counts map will be a summed map over all energies.
Parameters:
obs - Observation container
Keywords:
proj - Projection type (e.g. TAN, CAR, STG, ...) (default: TAN)
coord - Coordinate type (GAL, CEL) (default: GAL)
xval - Reference longitude value [deg] (default: 0.0)
yval - Reference latitude value [deg] (default: 0.0)
binsz - Pixel size [deg/pixel] (default: 0.05)
nxpix - Number of pixels in X direction (default: 200)
nypix - Number of pixels in Y direction (default: 200)
outname - Counts map FITS filename (default: cntmap.fits)
"""
# Allocate counts map
map = GSkymap(proj, coord, xval, yval, -binsz, binsz, nxpix, nypix, 1)
# Fill all observations
for run in obs:
# Loop over all events
for event in run.events():
# Determine sky pixel
skydir = GCTAInstDir(event.dir()).dir()
pixel = map.dir2pix(skydir)
# Set pixel
map[pixel] += 1.0
# Save sky map. The clobber flag is set to True, so any existing FITS
# file will be overwritten.
map.save(outname, True)
# Return counts map
return map
# ================ #
# Create model map #
# ================ #
def modmap(obs, eref=0.1, proj="TAN", coord="GAL", xval=0.0, yval=0.0, \
binsz=0.05, nxpix=200, nypix=200, \
outname="modmap.fits"):
"""
Make model map for a given reference energy by combining all observations.
The model map will be evaluated for a given reference energy 'eref' and will
be given in units of [counts/(sr MeV s)].
Parameters:
obs - Observation container
Keywords:
eref - Reference energy for which model is created [TeV] (default: 0.1)
proj - Projection type (e.g. TAN, CAR, STG, ...) (default: TAN)
coord - Coordinate type (GAL, CEL) (default: GAL)
xval - Reference longitude value [deg] (default: 0.0)
yval - Reference latitude value [deg] (default: 0.0)
binsz - Pixel size [deg/pixel] (default: 0.05)
nxpix - Number of pixels in X direction (default: 200)
nypix - Number of pixels in Y direction (default: 200)
outname - Model map FITS filename (default: modmap.fits)
"""
# Allocate model map
map = GSkymap(proj, coord, xval, yval, -binsz, binsz, nxpix, nypix, 1)
# Set reference energy, time and direction. The time is not initialised and is
# in fact not used (as the IRF is assumed to be time independent for now).
# The sky direction is set later using the pixel values.
energy = GEnergy()
time = GTime()
instdir = GCTAInstDir()
energy.TeV(eref)
# Loop over all map pixels
for pixel in range(map.npix()):
# Get sky direction
skydir = map.pix2dir(pixel)
instdir.dir(skydir)
# Create event atom for map pixel
atom = GCTAEventAtom()
atom.dir(instdir)
atom.energy(energy)
atom.time(time)
# Initialise model value
value = 0.0
# Loop over all observations
for run in obs:
value += obs.models().eval(atom, run)
# Set map value
map[pixel] = value
# Save sky map
map.save(outname, True)
# Return model map
return map
# ======================= #
# Set one CTA observation #
# ======================= #
def set(pntdir, tstart=0.0, duration=1800.0, deadc=0.95, \
emin=0.1, emax=100.0, rad=5.0, \
irf="cta_dummy_irf", caldb="$GAMMALIB/share/caldb/cta"):
"""
Returns a single CTA observation. By looping over this function we can
add CTA observations to the observation container.
Parameters:
pntdir - Pointing direction
Keywords:
tstart - Start time [seconds] (default: 0.0)
duration - Duration of observation [seconds] (default: 1800.0)
deadc - Deadtime correction factor (default: 0.95)
emin - Minimum event energy [TeV] (default: 0.1)
emax - Maximum event energy [TeV] (default: 100.0)
rad - ROI radius used for analysis [deg] (default: 5.0)
irf - Instrument response function (default: cta_dummy_irf)
caldb - Calibration database path (default: $GAMMALIB/share/caldb/cta)
"""
# Allocate CTA observation
obs = GCTAObservation()
# Set pointing direction
pnt = GCTAPointing()
pnt.dir(pntdir)
obs.pointing(pnt)
# Set ROI
roi = GCTARoi()
instdir = GCTAInstDir()
instdir.dir(pntdir)
roi.centre(instdir)
roi.radius(rad)
# Set GTI
gti = GGti()
start = GTime(tstart)
stop = GTime(tstart+duration)
gti.append(start, stop)
# Set energy boundaries
ebounds = GEbounds()
e_min = GEnergy()
e_max = GEnergy()
e_min.TeV(emin)
e_max.TeV(emax)
ebounds.append(e_min, e_max)
# Allocate event list
events = GCTAEventList()
events.roi(roi)
events.gti(gti)
events.ebounds(ebounds)
obs.events(events)
# Set instrument response
obs.response(irf, caldb)
# Set ontime, livetime, and deadtime correction factor
obs.ontime(duration)
obs.livetime(duration*deadc)
obs.deadc(deadc)
# Return observation
return obs
| cdeil/ctools | scripts/obsutils.py | Python | gpl-3.0 | 9,202 | 0.040535 |
import doto.model
import doto.model.task
CREATE_CMD = """
CREATE TABLE IF NOT EXISTS
timerecords (
id INTEGER NOT NULL,
task_id INTEGER,
start TIMESTAMP,
end TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY(task_id) REFERENCES tasks (id)
)
"""
class Timerecord(object):
"""
A timerecord is a time span for which one worked on a task
A timerecord is a time span that is assosiated with a event.
The sum of all timerecords is the total amount of work taht was put into the Task.
This can be used to track the amount of time one worked a specific task.
This should come in handy for freelancers (like me).
"""
def __init__(self, start, end=None, task_event=None):
"""
"""
self.id = None
self.span = doto.model.TimeSpan(start=start, end=end)
self.task = task_event
@staticmethod
def row_to_obj(row, store):
"""
Create Task from database row
"""
timerecord = doto.model.unwrap_row(store,
row,
Timerecord,
('start', 'end'),
('id',))
task_id = row['task_id']
if task_id is None:
timerecord.task = None
else:
timerecord.task = doto.model.task.get(store, task_id)
return timerecord
@staticmethod
def obj_to_row(obj):
row_dict = doto.model.unwrap_obj(obj, ignore_list=['span', 'task'])
row_dict['task_id'] = obj.task.id if obj.task is not None else None
row_dict['start'] = obj.span.start
row_dict['end'] = obj.span.end
return row_dict
def get_started_timerecords(store):
"""
Get all task which are not completed.
@param cache if True the result will be stored in the cache
so a cache_id can be used. Default=False
@param limit Set the maximum number of returned items. Default=10
@return A list of unfinished tasks
"""
return store.query(Timerecord.row_to_obj, 'SELECT * FROM timerecords WHERE end IS NULL;', ())
insert_query = """INSERT INTO timerecords ( task_id, start, end)
VALUES (:task_id, :start, :end)
;
"""
update_query = """UPDATE timerecords SET task_id = :task_id,
start = :start,
end = :end
WHERE id = :id;
"""
delete_query = 'DELETE FROM timerecords WHERE id = ?;'
update = doto.model.crud.update(update_query, Timerecord)
add_new = doto.model.crud.insert(insert_query, Timerecord)
delete = doto.model.crud.delete(delete_query)
doto.model.setup_module(CREATE_CMD, ())
| tantSinnister/doto | doto/model/timerecord.py | Python | bsd-3-clause | 2,998 | 0.000667 |
import os
import shutil
import re
import zipfile
import xml.etree.ElementTree as ET
from tempfile import TemporaryDirectory
import psycopg2
conn = psycopg2.connect(
database='innoter', user='postgres', password='postgres', host='192.168.0.107', port='5432')
cursor = conn.cursor()
dst_dir = r"\\nas1\storage\DG_archive\sat"
path_list = []
cursor.execute(
"""SELECT path, order_id
FROM geoarchive.dg_orders
WHERE aero is not TRUE""")
results = cursor.fetchall()
for i, result in enumerate(results):
zip_path, order_id = result[0], result[1]
print(i + 1, zip_path)
dst_filepath = os.path.join(dst_dir, os.path.basename(zip_path))
shutil.move(zip_path, dst_filepath)
cursor.execute("""UPDATE geoarchive.dg_orders
SET path = %s
WHERE order_id = %s""", [dst_filepath, order_id, ], )
conn.commit()
print('Готово...\n')
# with zipfile.ZipFile(zip_path) as zf:
# order_shape = [fnm for fnm in zf.namelist() if re.match(r'.+ORDER_SHAPE.+', fnm, re.I)]
# if not order_shape:
# # for fnm in zf.namelist():
# # if re.match(r'.+ORDER_SHAPE.+', fnm, re.I) is None:
# cursor.execute("""UPDATE geoarchive.dg_orders
# SET aero = TRUE
# WHERE order_id = %s""", [order_id, ],)
# conn.commit()
# print(80*'=', order_id, 80*'=')
# aero_list.append(order_id)
#
# print('\nDone:\n', len(aero_list))
# for i in aero_list:
# print(i)
| bazzile/imScrape | Scripts/v1/dev/auxiliary/move_imagery.py | Python | mit | 1,527 | 0.001972 |
"""
Tree based methods of learning (classification and regression)
"""
import abc
import numpy as np
import networkx as nx
from scipy.stats import mode
class BaseTree(object):
"""
Base Tree for classification/regression. Written for single
variable/value binary split critereon. Many methods needs to be
rewritten if a more complex split critereon is desired.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""
Attributes:
graph (nx.DiGraph): Directed graph which stores tree
nodes (int): Current number of nodes in tree
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
learned (bool): Keeps track of if model has been fit
"""
self.graph = nx.DiGraph()
self.graph.add_node(1)
self.nodes = 1
self.X = None
self.y = None
self.learned = False
def fit(self, X, y, height, weights=None):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
height (int): height of tree
weights (np.array): array of sample weights
if None, all samples are weighted evenly
Returns: an instance of self
"""
self.X = X
self.y = y
self.weights = weights
for layer in range(height):
self.add_layer()
self.compute_class_averages()
self.learned = True
return self
def predict(self, x):
"""
Args:
x (np.array): Training data of shape[n_features,]
Returns:
float: predicted value
Raises:
ValueError if model has not been fit
Notes:
Currently, only a single data instance can be predicted at a time.
"""
if not self.learned:
raise NameError('Fit model first')
current_node = 1
leaves = self.get_leaves()
while current_node not in leaves:
children = self.graph.successors(current_node)
current_variable = self.graph.node[current_node]['variable']
current_cutoff = self.graph.node[current_node]['cutoff']
if current_variable is None:
return self.graph.node[current_node]['classval']
if x[current_variable] > current_cutoff:
current_node = children[1]
else:
current_node = children[0]
return self.graph.node[current_node]['classval']
def add_layer(self):
"""
Used by Fit() to add a single layer at the bottom of the tree
"""
leaves = self.get_leaves()
for leaf in leaves:
data_indices = self.partition_data(leaf)
leaf_X = self.X[data_indices, :]
leaf_y = self.y[data_indices]
self.add_split(leaf, leaf_X, leaf_y)
def get_leaves(self):
"""
Used by add_layer() to get the leaves of the tree.
"""
leaves = []
for node in self.graph.nodes():
if len(self.graph.successors(node)) == 0:
leaves.append(node)
return leaves
def add_split(self, node_number, data, values):
"""
Used by add_layer() to add two children at a leaf in the tree
Args:
node_number (int): Node in tree which a new split is added to
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
"""
min_feature, min_split = self.learn_split(data, values)
self.graph.node[node_number]['variable'] = min_feature
self.graph.node[node_number]['cutoff'] = min_split
for i in range(2):
self.nodes += 1
self.graph.add_edge(node_number, self.nodes)
def partition_data(self, node_number):
"""
Partitions the training data at a given node. Traverses the
entire down to the indicated node.
Args:
node_number (int): Node in tree to partition data down to
Returns:
np.array: Array of indices from training data which
partition to node
"""
predecessors = self.get_predecessors(node_number)
predecessors.reverse()
predecessors.append(node_number)
data_indices = np.array(range(len(self.y)))
node_count = 0
while node_count < len(predecessors) - 1:
current_node = predecessors[node_count]
next_node = predecessors[node_count + 1]
current_variable = self.graph.node[current_node]['variable']
current_cutoff = self.graph.node[current_node]['cutoff']
if current_cutoff is None:
return []
if next_node == min(self.graph.successors(current_node)):
data_indices = data_indices[self.X[data_indices,
current_variable]
< current_cutoff]
else:
data_indices = data_indices[self.X[data_indices,
current_variable]
> current_cutoff]
node_count += 1
return data_indices
def get_predecessors(self, node_number):
"""
Used by parition_data() to get predecessors of a given node
(to walk down the tree).
"""
predecessors = []
current_node = node_number
while len(self.graph.predecessors(current_node)) > 0:
current_node = self.graph.predecessors(current_node)[0]
predecessors.append(current_node)
return predecessors
@abc.abstractmethod
def compute_class_averages(self):
"""
Method to compute average value for all nodes in the tree
"""
return
@abc.abstractmethod
def learn_split(self, inputs, values):
"""
Method to learn split given a data set (inputs) with
target values (values)
"""
return
class RegressionTree(BaseTree):
"""
Regression Tree implimenting CART algorithm
"""
def __init__(self):
BaseTree.__init__(self)
def learn_split(self, inputs, values):
"""
CART algorithm to learn split at node in tree.
Minimizes mean squared error of the two classes generated.
Args:
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
Returns: (min_split, min_feature)
min_split (float): feature value at which to split
min_feature (int): feature number to split data by
Essentially, the column number from the data which
split is performed on
"""
if self.weights is None:
weights = np.ones(len(values))
else:
weights = np.array(self.weights)
min_error = np.inf
min_feature = None
min_split = None
for feature in range(np.shape(inputs)[1]):
feature_vector = inputs[:, feature]
sorted_vector = np.unique(np.sort(feature_vector))
feature_splits = (sorted_vector[1:] + sorted_vector[:-1]) / 2
for split in feature_splits:
lower_class_average = np.mean(values[feature_vector < split])
upper_class_average = np.mean(values[feature_vector > split])
lower_class_errors = (values[feature_vector < split] -
lower_class_average) * \
weights[feature_vector < split]
upper_class_errors = (values[feature_vector > split] -
upper_class_average) * \
weights[feature_vector > split]
total_error = np.inner(lower_class_errors,
lower_class_errors) + \
np.inner(upper_class_errors, upper_class_errors)
if total_error < min_error:
min_error = total_error
min_feature = feature
min_split = split
return min_feature, min_split
def compute_class_averages(self):
"""
Computes the class average of each node in the tree.
Class average is mean of training data that partitions to the node.
"""
for i in range(2, self.nodes + 1):
parent = self.graph.predecessors(i)[0]
if self.graph.node[parent]['cutoff'] is None:
self.graph.node[i]['classval'] = self.graph.node[parent]['classval']
else:
node_indices = self.partition_data(i)
classval = self.y[node_indices].mean()
self.graph.node[i]['classval'] = classval
class ClassificationTree(BaseTree):
"""
Classification Tree implimenting CART algorithm
"""
def __init__(self):
BaseTree.__init__(self)
def learn_split(self, inputs, values):
"""
CART algorithm to learn split at node in tree.
Minimizes total misclassification error.
Args:
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
Returns: (min_split, min_feature)
min_split (float): feature value at which to split
min_feature (int): feature number to split data by
Essentially, the column number from the data which
split is performed on
"""
if self.weights is None:
weights = np.ones(len(values))
else:
weights = np.array(self.weights)
min_error = np.inf
min_feature = None
min_split = None
for feature in range(np.shape(inputs)[1]):
feature_vector = inputs[:, feature]
sorted_vector = np.unique(np.sort(feature_vector))
feature_splits = (sorted_vector[1:] + sorted_vector[:-1]) / 2
for split in feature_splits:
lower_class_mode = mode(values[feature_vector < split]).mode[0]
upper_class_mode = mode(values[feature_vector > split]).mode[0]
lower_class_errors = np.sum((values[feature_vector
< split] !=
lower_class_mode).astype(int) *
weights[feature_vector < split])
upper_class_errors = np.sum((values[feature_vector
> split] !=
upper_class_mode).astype(int) *
weights[feature_vector > split])
total_error = upper_class_errors + lower_class_errors
if total_error < min_error:
min_error = total_error
min_feature = feature
min_split = split
return min_feature, min_split
def compute_class_averages(self):
"""
Computes the class average of each node in the tree.
Class average is the mode of training data that partitions to the node.
"""
for i in range(2, self.nodes + 1):
parent = self.graph.predecessors(i)[0]
if self.graph.node[parent]['cutoff'] is None:
self.graph.node[i]['classval'] = self.graph.node[parent]['classval']
else:
node_indices = self.partition_data(i)
classval = mode(self.y[node_indices]).mode[0]
self.graph.node[i]['classval'] = classval
class PrimRegression(BaseTree):
"""
PRIM: Patient Rule Induction Method
Decision at node peels of 10% of data which maximizes response mean
More "patient" than CART algorithm.
NOTE:
Since decision is a "box", many methods in BaseTree class
are overwritten. In the futute, BaseTree can be reworked
to accomodate more flexible decisions
"""
def __init__(self):
BaseTree.__init__(self)
def add_split(self, node_number, data, values):
"""
Used by add_layer() to add two children at a leaf in the tree
Args:
node_number (int): Node in tree which a new split is added to
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
"""
cutoffs = self.learn_split(data, values)
self.graph.node[node_number]['cutoffs'] = cutoffs
for i in range(2):
self.nodes += 1
self.graph.add_edge(node_number, self.nodes)
def learn_split(self, inputs, values):
"""
PRIM algorithm to learn split at node in tree.
Maximizes response mean after "boxing off" 90% of data.
Args:
data (np.ndarray): data of shape[n_samples, n_features]
Data which node split will be based off of
values (np.array): values of shape[n_samples,]
Target values which node split will be based off of
Returns:
dict: Dictionary of cutoffs to use
{variable: [min_cutoff, max_cutoff]}
Example: {3, [-12.5, 10]} means samples boxed between 12.5 and 10
on variable 3 are in the box.
Note: in an early implimentation, this dictiory could contain
single values. Currently, it only ever contains a single
value. This can be simplified in the future.
"""
target_bin_size = len(self.y)/10
cutoffs = {}
if len(values) <= target_bin_size:
return cutoffs
best_variable = None
best_cutoff = [-np.inf, np.inf]
mean_response = np.mean(values)
for feature in range(np.shape(inputs)[1]):
feature_vector = inputs[:, feature]
sorted_vector = np.unique(np.sort(feature_vector))
feature_splits = (sorted_vector[1:] + sorted_vector[:-1]) / 2
lower_split, upper_split = [int(len(feature_splits) * 0.1),
int(len(feature_splits) * 0.9)]
boxed_data_upper = values[inputs[:, feature]
> feature_splits[lower_split]]
boxed_data_lower = values[inputs[:, feature]
< feature_splits[upper_split]]
max_split = max(np.mean(boxed_data_lower),
np.mean(boxed_data_upper))
if max_split > mean_response:
mean_response = max_split
if np.mean(boxed_data_upper) > np.mean(boxed_data_lower):
best_cutoff = [feature_splits[lower_split], np.inf]
else:
best_cutoff = [-np.inf, feature_splits[upper_split]]
best_variable = feature
if best_variable is None:
return cutoffs
for i in range(np.shape(inputs)[1]):
cutoffs[i] = [-np.inf, np.inf]
cutoffs[best_variable] = best_cutoff
return cutoffs
def predict(self, x):
"""
Args:
x (np.array): Training data of shape[n_features,]
Returns:
float: predicted value
Raises:
ValueError if model has not been fit
Notes:
Currently, only a single data instance can be predicted at a time.
"""
if not self.learned:
raise NameError('Fit model first')
current_node = 1
leaves = self.get_leaves()
while current_node not in leaves:
children = self.graph.successors(current_node)
if self.graph.node[current_node]['cutoffs'] is None:
return self.graph.node[current_node]['classval']
within_box = True
for key in self.graph.node[current_node]['cutoffs']:
current_variable = key
current_cutoff = self.graph.node[current_node]['cutoffs'][key]
if x[current_variable] < current_cutoff[0] or \
x[current_variable] > current_cutoff[1]:
within_box = False
if within_box:
current_node = children[0]
else:
current_node = children[1]
return self.graph.node[current_node]['classval']
def compute_class_averages(self):
"""
Computes the class average of each node in the tree.
Class average is the mean of training data that partitions to the node.
"""
for i in range(2, self.nodes + 1):
parent = self.graph.predecessors(i)[0]
if self.graph.node[parent]['cutoffs'] == {}:
self.graph.node[i]['classval'] = self.graph.node[parent]['classval']
else:
node_indices = self.partition_data(i)
if len(node_indices) == 0:
self.graph.node[i]['classval'] = self.graph.node[parent]['classval']
else:
classval = self.y[node_indices].mean()
self.graph.node[i]['classval'] = classval
def partition_data(self, node_number):
"""
Partitions the training data at a given node. Traverses the
entire down to the indicated node.
Args:
node_number (int): Node in tree to partition data down to
Returns:
np.array: Array of indices from training data which
partition to node
"""
predecessors = self.get_predecessors(node_number)
predecessors.reverse()
predecessors.append(node_number)
data = self.X
data_indices = np.array(range(len(self.y)))
node_count = 0
while node_count < len(predecessors) - 1:
temp_data = data[data_indices]
current_node = predecessors[node_count]
next_node = predecessors[node_count + 1]
cutoff_dict = self.graph.node[current_node]['cutoffs']
if cutoff_dict is None:
return None
in_box = self.partition_data_nodeless(temp_data, cutoff_dict)
if in_box is None:
return None
if next_node == min(self.graph.successors(current_node)):
data_indices = data_indices[in_box]
else:
data_indices = np.delete(data_indices, in_box)
node_count += 1
if len(data_indices) == 0:
return []
return data_indices
@staticmethod
def partition_data_nodeless(inputs, cutoff_dict):
"""
Partitions inputs based off of a cutoff dictionary which can contain
cutoffs for many varialbes (although this feature is currently unused)
"""
data_indices = np.array(range(np.shape(inputs)[0]))
if cutoff_dict is None:
return None
for key in cutoff_dict:
current_variable = key
current_cutoff_min = cutoff_dict[key][0]
current_cutoff_max = cutoff_dict[key][1]
boxed_data = data_indices[(inputs[data_indices,
current_variable] <
current_cutoff_max) &
(inputs[data_indices,
current_variable] >
current_cutoff_min)]
data_indices = boxed_data
return data_indices
class DiscreteAdaBoost(object):
"""
Ada Boost classifier.
This implimentation produces a series of decisions stumps
(decision trees with two terminal nodes).
"""
def __init__(self):
"""
Attributes:
stump_count (int): current number of stumps generated for model
stumps (list): list to hold stumps generated by moldel
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
weights (list): list of weights. Each weight[i] is a
list of weights containing sample weights of
shape[n_samples, 1], corresponding to
stump in stumps[i]
alphas (list): List of alphas, which determine how much
to weight each decision stump in final model.
learned (bool): Keeps track of if model has been fit
"""
self.stump_count = 0
self.stumps = []
self.X = None
self.y = None
self.weights = []
self.alphas = []
self.learned = False
def fit(self, X, y, n_stumps=100):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.array): Target values of shape[n_samples]
n_stumps (int): number of stumps in classifier
Returns: an instance of self
"""
self.X = X
self.y = y
n_samples = len(self.y)
while self.stump_count < n_stumps:
if len(self.weights) == 0:
current_weights = np.ones(n_samples) / n_samples
else:
current_weights = self.weights[-1]
self.add_stump(current_weights)
self.stump_count += 1
self.learned = True
return self
def add_stump(self, weights):
"""
weights (np.array): array of weights of shape[n_samples,] to weight
each sample in current stump
Notes:
Method adds a single decision stump to self.stumps and ALSO
calculates weights of samples for the generated stump
Returns: an instance of self.
self.stumps and self.weights are appended with the
newest stump/weights
"""
stump = ClassificationTree()
stump.fit(self.X, self.y, height=1, weights=weights)
predictions = []
for row in self.X:
prediction = stump.predict(row)
predictions.append(prediction)
current_misclassifications = (predictions != self.y).astype(int)
current_error = np.sum(current_misclassifications * weights) / \
np.sum(weights)
current_alpha = np.log((1-current_error) / current_error)
current_weights = weights * np.exp(current_alpha *
current_misclassifications)
if current_error == 0:
self.weights.append(np.ones(len(current_weights)))
self.alphas.append(1)
else:
self.alphas.append(current_alpha)
self.weights.append(current_weights)
self.stumps.append(stump)
return self
def predict(self, x):
"""
Args:
x (np.array): Training data of shape[n_features,]
Returns:
float: predicted value
Raises:
ValueError if model has not been fit
Notes:
Currently, only a single data instance can be predicted at a time.
"""
if not self.learned:
raise NameError('Fit model first')
stump_predictions = []
for i in range(self.stump_count):
stump_prediction = self.stumps[i].predict(x)
stump_predictions.append(stump_prediction)
predictions = np.array(stump_predictions)
prediction = np.sign(np.sum(predictions * self.alphas))
return prediction
class GradientBoostingRegression(object):
"""
Gradient boosting regression.
A series of small trees (weak classifiers) combined to produce a
hypothesis. Each subsequent classifier fits a differential loss function
of the previous model. Currently, only MSE is supported,
but the differential loss function can be a variety of functions.
"""
def __init__(self):
"""
Attributes:
tree_count (int): current number of trees generated for model
trees (list): list to hold trees generated by model
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
initial_hypothesis (float): Starting hypothesis for model.
This is just mean of target values, once the model is fit.
learning_rate (float): Contribution each subsequent tree makes to
the final model.
tree_depth (int): Depth of each tree generated by model.
learned (bool): Keeps track of if model has been fit
"""
self.tree_count = 0
self.trees = []
self.X = None
self.y = None
self.initial_hypothesis = None
self.learning_rate = None
self.tree_depth = None
self.learned = False
def fit(self, X, y, n_trees=20, tree_depth=3, learning_rate=0.1):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.array): Target values of shape[n_samples]
n_trees (int): number of trees in regressor
tree_depth (int): height of each tree in regressor
learning_rate (float): shrinkage parameter. Determines how much
of each new tree to contribute to final hypothesis.
Returns: an instance of self
"""
self.X = X
self.y = y
self.learning_rate = learning_rate
self.tree_depth = tree_depth
self.initial_hypothesis = np.mean(self.y)
while self.tree_count < n_trees:
current_predictions = []
for row in self.X:
current_predictions.append(self.predict(row))
current_predictions = np.array(current_predictions)
current_residuals = -(self.y - current_predictions)
self.add_tree(current_residuals)
self.tree_count += 1
self.learned = True
return self
def add_tree(self, residuals):
"""
residuals (np.array): array of residuals of shape[n_samples,]
calculated from the current model
Notes:
Method adds a single decision tree to self.trees by fitting
the residuals of the current model
Returns: an instance of self.
"""
tree = RegressionTree()
tree.fit(self.X, residuals, self.tree_depth)
self.trees.append(tree)
return self
def predict(self, x):
"""
Args:
x (np.array): Training data of shape[n_features,]
Returns:
float: predicted value
Raises:
ValueError if model has not been fit
Notes:
Currently, only a single data instance can be predicted at a time.
"""
prediction = self.initial_hypothesis
for tree in self.trees:
gradient = -tree.predict(x)
prediction += self.learning_rate * gradient
return prediction
class RandomForestRegression(object):
"""
Random Forests are an ensemble method which averages a set of
de-correlated trees.
In this implimentation, a bootstrapped sample is used to generage
each tree, and each tree uses a subset of total features.
"""
def __init__(self):
"""
Attributes:
tree_count (int): current number of trees generated for model
trees (list): list to hold trees generated by model
features (list): list to hold features used for each tree.
features[i] holds the features used in trees[i] since each tree
only uses a subset of features
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
tree_depth (int): Depth of each tree generated by model.
learned (bool): Keeps track of if model has been fit
"""
self.tree_count = 0
self.trees = []
self.features = []
self.X = None
self.y = None
self.tree_depth = None
self.learned = False
def fit(self, X, y, n_trees=20, tree_depth=6, bootstrap=True):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.array): Target values of shape[n_samples]
n_trees (int): number of trees in regressor
tree_depth (int): height of each tree in regressor
bootstrap (bool): Whether a bootstrap sample is used for
tree fitting
Returns: an instance of self
"""
self.X = X
self.y = y
n_samples, n_features = np.shape(X)
features_per_tree = int(np.sqrt(n_features))
self.tree_depth = tree_depth
while self.tree_count < n_trees:
current_variables = np.random.choice(np.arange(n_features),
features_per_tree,
replace=False)
self.features.append(current_variables)
if bootstrap:
current_indices = np.random.choice(np.arange(n_samples),
n_samples,
replace=True)
else:
current_indices = np.arange(n_samples)
current_samples = self.X[current_indices]
self.add_tree(current_samples[:, current_variables],
self.y[current_indices])
self.tree_count += 1
self.learned = True
return self
def add_tree(self, X, y):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
Notes:
This method is used because random forests used a bootstraped
sample and only a subset of features
Returns: an instance of self.
"""
current_tree = RegressionTree()
current_tree.fit(X, y, height=self.tree_depth)
self.trees.append(current_tree)
return self
def predict(self, x):
"""
Args:
x (np.array): Training data of shape[n_features,]
Returns:
float: predicted value
Raises:
ValueError if model has not been fit
Notes:
Currently, only a single data instance can be predicted at a time.
"""
if not self.learned:
raise NameError('Fit model first')
predictions = []
for index, tree in enumerate(self.trees):
current_data = x[self.features[index]]
prediction = tree.predict(current_data)
predictions.append(prediction)
return np.mean(predictions)
| christopherjenness/ML-lib | ML/treemethods.py | Python | mit | 31,697 | 0.000126 |
#!/usr/bin/env python3
from math import floor, ceil
lines = []
with open('input.txt', 'r') as f:
lines = f.read().splitlines()
def get_val(line, start_pos, end_pos, lhalf, uhalf, uhalf_char):
for x in line[start_pos:end_pos:]:
if x == uhalf_char: # take lower half
uhalf -= ceil((uhalf - lhalf) / 2)
else: # take upper half
lhalf += ceil((uhalf - lhalf) / 2)
if lhalf != uhalf:
return Exception("Something went wrong: {} != {}".format(lhalf, uhalf))
return uhalf
#--- challenge 1
seat_ids = []
for boarding_pass in lines:
row = get_val(boarding_pass, 0, 7, 0, 127, 'F')
column = get_val(boarding_pass, 7, 10, 0, 7, 'L')
seat_ids.append(row * 8 + column)
print("Solution to challenge 1: {}".format(max(seat_ids)))
#--- challenge 2
seat_ids.sort()
missing_seat = ""
for x in range(seat_ids[0], seat_ids[-1]):
if x not in seat_ids:
missing_seat = x
print("Solution to challenge 2: {}".format(missing_seat))
| jekhokie/scriptbox | python--advent-of-code/2020/5/solve.py | Python | mit | 960 | 0.015625 |
import json
import re
from SBaaS_LIMS.lims_calibratorsAndMixes_query import lims_calibratorsAndMixes_query
from SBaaS_LIMS.lims_sample_query import lims_sample_query
from .lims_quantitationMethod_query import lims_quantitationMethod_query
from .stage01_quantification_MQResultsTable_query import stage01_quantification_MQResultsTable_query
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from SBaaS_base.sbaas_template_io import sbaas_template_io
from ddt_python.ddt_container import ddt_container
class lims_quantitationMethod_io(lims_quantitationMethod_query,
stage01_quantification_MQResultsTable_query,
lims_calibratorsAndMixes_query,
#lims_msMethod_query,
lims_sample_query,
sbaas_template_io
):
def import_calibration_sampleAndComponents(self, filename):
'''import calibration curve sample and component information'''
data = base_importData();
data.read_csv(filename);
data.format_data();
# split into separate data structures
samplesComponents_data = [];
for d in data.data:
samplesComponents_data.append({'sample_name':d['Sample Name'],
'sample_type':d['Sample Type'],
'met_id':d['Component Group Name']});
data.clear_data();
return samplesComponents_data;
def export_calibrationConcentrations(self, sampleAndComponent_fileName_I, concentrations_fileName_O):
'''export calibrator concentrations for "cut&paste" into Actual Concentration column in MultiQuant
when filtering Analytes only'''
#Input:
# sampleAndComponent_fileName_I = .csv file specifying sample_name, sample_type, and component_group_name
#Output:
# concentrations_fileName_O = .csv file specifying sample_name, sample_type, component_group_name, and actual_concentration
concentrations_O = [];
met_id_conv_dict = {'Hexose_Pool_fru_glc-D':'glc-D',
'Pool_2pg_3pg':'3pg'};
#import sampleAndComponents
samplesComponents = [];
samplesComponents = self.import_calibration_sampleAndComponents(sampleAndComponent_fileName_I);
#data = base_importData();
#data.read_csv(sampleAndComponent_fileName_I);
#samplesComponents = data.data;
for sc in samplesComponents:
# if met_id is a pool of metabolites, convert to the metabolite
# that is logged in calibrator tables and standards tables
if sc['met_id'] in list(met_id_conv_dict.keys()):
met_id_conv = met_id_conv_dict[sc['met_id']];
else:
met_id_conv = sc['met_id'];
#query calibrator_id and calibrator_level from sample
calibrator_id,calibrator_level = None,None;
calibrator_id,calibrator_level = self.get_calibratorIDAndLevel_sampleNameAndSampleType_sample(sc['sample_name'],sc['sample_type']);
#query calibrator_concentration from calibrator_concentrations
calibrator_concentration, concentration_units = 'N/A', None;
if calibrator_id and calibrator_level:
calibrator_concentration, concentration_units = self.get_calibratorConcentrationAndUnit_metIDAndCalibratorIDAndLevel_calibratorConcentrations(met_id_conv,calibrator_id,calibrator_level);
concentrations_O.append({'sample_name':sc['sample_name'], 'sample_type':sc['sample_type'],'component_group_name':sc['met_id'], 'actual_concentration':calibrator_concentration});
# write calibration curve concentrations to file
export = base_exportData(concentrations_O);
export.write_dict2csv(concentrations_fileName_O);
def import_quantitationMethod_add(self,QMethod_id_I, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_quantitationMethod(QMethod_id_I, data.data);
data.clear_data();
def export_quantitationMethod_js(self,QMethod_id_I,component_names_I=[],data_dir_I='tmp'):
'''Export the quantitation and calibrators to ddt'''
#get the calibrator data
data_1 = [];
data_2 = [];
data_1a = [];
# get the sample names that were used to generate the calibration curve:
if component_names_I:
component_names = component_names_I;
else:
component_names = [];
component_names = self.get_components(QMethod_id_I);
for cn in component_names:
# get the quant method parameters for each component
fit,weighting,use_area = self.get_quantMethodParameters(QMethod_id_I,cn);
# get the sample names for that component
sample_names = [];
sample_names = self.get_sampleNames_QMethodIDAndComponentNameAndSampleType(QMethod_id_I,cn,sample_type_I='Standard');
if not sample_names: continue;
concentrations = []
ratios = [];
for sn in sample_names:
# get the quant method rows
row = {};
row = self.get_row_sampleNameAndComponentName(sn,cn);
if row and not row is None and not row['concentration_ratio'] is None:
if use_area: row['ratio'] = row['area_ratio'];
else: row['ratio'] = row['height_ratio'];
row['acquisition_date_and_time'] = None;
data_1.append(row);
concentrations.append(row['concentration_ratio']);
ratios.append(row['ratio']);
if not concentrations: continue;
# get the quant method statistics
row = {};
row = self.get_row_QMethodIDAndComponentNamequantitationMethod(QMethod_id_I,cn);
if row:
data_2.append(row);
# generate the line of best fit
min_ratio = min(ratios);
max_ratio = max(ratios);
index_min = [cnt for cnt,x in enumerate(ratios) if x == min_ratio][0];
index_max = [cnt for cnt,x in enumerate(ratios) if x == max_ratio][0];
conc_min = min(concentrations);
conc_max = max(concentrations);
sample_name_min = sample_names[index_min];
sample_name_max = sample_names[index_max];
data_1a.append({'concentration_ratio':row['lloq'],
'ratio':min_ratio,
'component_name':cn,
'sample_name':sample_name_min,
'id':QMethod_id_I});
data_1a.append({'concentration_ratio':row['uloq'],
'ratio':max_ratio,
'component_name':cn,
'sample_name':sample_name_max,
'id':QMethod_id_I});
# dump chart parameters to a js files
data1_keys = [
'id',
'concentration_ratio',
'sample_name',
'component_name',
'ratio',
];
data1_nestkeys = ['component_name'];
data1_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':'sample_name'};
data2_keys = ['id',
'q1_mass',
'q3_mass',
'met_id',
'component_name',
'is_name',
'fit',
'weighting',
'intercept',
'slope',
'correlation',
'use_area',
'lloq',
'uloq',
'points',
];
data2_nestkeys = ['component_name'];
data2_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':None};
parameters = {"chart1margin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"chart1width":500,"chart1height":350,
"chart1title":"Metric Plot", "chart1x1axislabel":"sample_name","chart1y1axislabel":"measurement"}
# make the data object
dataobject_O = [{"data":data_1,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data_1a,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data_2,"datakeys":data2_keys,"datanestkeys":data2_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1','htmltype':'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterlineplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"concentration_ratio","svgy1axislabel":"measurement_ratio",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
svgtileparameters_O = {'tileheader':'Regression','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'Regression Statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,1],"tile3":[2]};
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_quantitationMethods_v1_js(self,QMethod_ids_I = [],component_names_I=[],data_dir_I='tmp'):
'''Export the quantitation and calibrators to ddt'''
#get the calibrator data
data_1 = [];
data_2 = [];
data_1a = [];
for QMethod_id in QMethod_ids_I:
# get the sample names that were used to generate the calibration curve:
if component_names_I:
component_names = component_names_I;
else:
component_names = [];
component_names = self.get_components(QMethod_id);
for cn in component_names:
# get the quant method parameters for each component
fit,weighting,use_area = self.get_quantMethodParameters(QMethod_id,cn);
# get the sample names for that component
sample_names = [];
sample_names = self.get_sampleNames_QMethodIDAndComponentNameAndSampleType(QMethod_id,cn,sample_type_I='Standard');
if not sample_names: continue;
concentrations = []
ratios = [];
for sn in sample_names:
# get the quant method rows
row = {};
row = self.get_row_sampleNameAndComponentName(sn,cn);
if row and not row is None and not row['concentration_ratio'] is None:
if use_area: row['ratio'] = row['area_ratio'];
else: row['ratio'] = row['height_ratio'];
row['acquisition_date_and_time'] = None;
row['id']=QMethod_id;
data_1.append(row);
concentrations.append(row['concentration_ratio']);
ratios.append(row['ratio']);
if not concentrations: continue;
# get the quant method statistics
row = {};
row = self.get_row_QMethodIDAndComponentNamequantitationMethod(QMethod_id,cn);
if row:
data_2.append(row);
# generate the line of best fit
min_ratio = min(ratios);
max_ratio = max(ratios);
index_min = [cnt for cnt,x in enumerate(ratios) if x == min_ratio][0];
index_max = [cnt for cnt,x in enumerate(ratios) if x == max_ratio][0];
conc_min = min(concentrations);
conc_max = max(concentrations);
sample_name_min = sample_names[index_min];
sample_name_max = sample_names[index_max];
data_1a.append({'concentration_ratio':row['lloq'],
'ratio':min_ratio,
'component_name':cn,
'sample_name':sample_name_min,
'id':QMethod_id});
data_1a.append({'concentration_ratio':row['uloq'],
'ratio':max_ratio,
'component_name':cn,
'sample_name':sample_name_max,
'id':QMethod_id});
# dump chart parameters to a js files
data1_keys = [
'id',
'concentration_ratio',
'sample_name',
'component_name',
'ratio',
];
data1_nestkeys = ['component_name'];
data1_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':'sample_name'};
data2_keys = ['id',
'q1_mass',
'q3_mass',
'met_id',
'component_name',
'is_name',
'fit',
'weighting',
'intercept',
'slope',
'correlation',
'use_area',
'lloq',
'uloq',
'points',
];
data2_nestkeys = ['component_name'];
data2_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':None};
parameters = {"chart1margin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"chart1width":500,"chart1height":350,
"chart1title":"Metric Plot", "chart1x1axislabel":"sample_name","chart1y1axislabel":"measurement"}
# make the data object
dataobject_O = [{"data":data_1,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data_1a,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data_2,"datakeys":data2_keys,"datanestkeys":data2_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1','htmltype':'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterlineplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"concentration_ratio","svgy1axislabel":"measurement_ratio",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
svgtileparameters_O = {'tileheader':'Regression','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'Regression Statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,1],"tile3":[2]};
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_quantitationMethods_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export the quantitation and calibrators to ddt'''
#get the calibrator data
data_1 = [];
data_2 = [];
data_1a = [];
#query all the data
data_all = self.getRowsJoin_analysisID_dataStage01QuantificationMQResultsTable_limsQuantitationMethod(analysis_id_I)
#reorganize into a dictionary of (QMethods,component_names)
data_dict = {};
for d in data_all:
unique = (d['quantitation_method_id'],d['component_name']);
if not unique in data_dict.keys():
data_dict[unique] = [];
data_dict[unique].append(d);
#split and modify the data
for QMethod_id,rows in data_dict.items():
concentrations = []
ratios = [];
sample_names = [];
for row in rows:
if row and not row is None and not row['concentration_ratio'] is None:
if row['use_area']: row['ratio'] = row['area_ratio'];
else: row['ratio'] = row['height_ratio'];
row['quantitation_method_id']=rows[0]['quantitation_method_id'];
data_1.append(row);
concentrations.append(row['concentration_ratio']);
ratios.append(row['ratio']);
sample_names.append(row['sample_name']);
if not concentrations: continue;
# get the quant method statistics
data_2.append(row);
# generate the line of best fit
min_ratio = min(ratios);
max_ratio = max(ratios);
index_min = [cnt for cnt,x in enumerate(ratios) if x == min_ratio][0];
index_max = [cnt for cnt,x in enumerate(ratios) if x == max_ratio][0];
conc_min = min(concentrations);
conc_max = max(concentrations);
sample_name_min = sample_names[index_min];
sample_name_max = sample_names[index_max];
data_1a.append({'concentration_ratio':rows[0]['lloq'],
'ratio':min_ratio,
'component_name':rows[0]['component_name'],
'sample_name':sample_name_min,
'quantitation_method_id':rows[0]['quantitation_method_id']});
data_1a.append({'concentration_ratio':rows[0]['uloq'],
'ratio':max_ratio,
'component_name':rows[0]['component_name'],
'sample_name':sample_name_max,
'quantitation_method_id':rows[0]['quantitation_method_id']});
# dump chart parameters to a js files
data1_keys = [
'quantitation_method_id',
'concentration_ratio',
'sample_name',
'component_name',
'ratio',
];
data1_nestkeys = ['component_name'];
data1_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':'sample_name'};
data2_keys = ['quantitation_method_id',
'q1_mass',
'q3_mass',
'met_id',
'component_name',
'is_name',
'fit',
'weighting',
'intercept',
'slope',
'correlation',
'use_area',
'lloq',
'uloq',
'points',
];
data2_nestkeys = ['component_name'];
data2_keymap = {'xdata':'concentration_ratio',
'ydata':'ratio',
'serieslabel':'component_name',
'featureslabel':None};
parameters = {"chart1margin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"chart1width":500,"chart1height":350,
"chart1title":"Metric Plot", "chart1x1axislabel":"sample_name","chart1y1axislabel":"measurement"}
# make the data object
dataobject_O = [{"data":data_1,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data_1a,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data_2,"datakeys":data2_keys,"datanestkeys":data2_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1','htmltype':'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterlineplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"concentration_ratio","svgy1axislabel":"measurement_ratio",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
svgtileparameters_O = {'tileheader':'Regression','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'Regression Statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,1],"tile3":[2]};
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects()); | dmccloskey/SBaaS_quantification | SBaaS_quantification/lims_quantitationMethod_io.py | Python | mit | 26,562 | 0.03569 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, (tpu_strategy.TPUStrategy,
tpu_strategy.TPUStrategyV1)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
training_epochs = 2
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
not distributed_training_utils.global_batch_size_supported(
with_distribution))
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_train, y_train)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
predict_inputs = {
'x': np.array(x_predict, dtype=np.float32),
}
else:
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(
train_dataset, batch_size, with_distribution, repeat=training_epochs)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(eval_dataset, batch_size, with_distribution)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies_minus_tpu = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'])
def tpu_strategy_combinations():
return combinations.combine(
distribution=tpu_strategies,
mode=['graph'])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def strategy_and_optimizer_combinations():
return combinations.times(
all_strategy_combinations(),
combinations.combine(optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
]))
def strategy_and_input_combinations():
return (
combinations.times(
combinations.combine(distribution=strategies_minus_tpu),
combinations.combine(mode=['graph'],
use_numpy=[True, False],
use_validation_data=[True, False])
+ combinations.combine(mode=['eager'],
use_numpy=[False],
use_validation_data=[False])) +
combinations.times(
combinations.combine(distribution=tpu_strategies),
combinations.combine(mode=['graph'],
use_numpy=[True, False],
use_validation_data=[True, False])))
def strategy_for_numpy_input_combinations():
return combinations.combine(
distribution=strategies_minus_tpu + tpu_strategies,
mode=['graph'])
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(combinations.combine(
distribution=strategies_minus_tpu, mode=['graph']))
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
inputs = np.zeros((20, 3), np.float32)
targets = np.zeros((20, 4), np.float32)
sample_weights = np.ones((20), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution,
experimental_run_tf_function=False)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(all_strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(
optimizer(),
loss,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_wrong_input_shape(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_no_batch_input_validation(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy):
# CoreMirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(strategy_minus_tpu_combinations())
def testOptimizerWithCallbacks(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
grouped_models = distribution.experimental_local_results(
distributed_training_utils.get_distributed_model(
model, ModeKeys.TRAIN))
with distribution.scope():
for m in grouped_models:
self.assertAllClose(0.001, keras.backend.get_value(
m.optimizer.lr), atol=1e-05, rtol=1e-05)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument for dataset with
# infinite cardinality.
dataset = dataset.repeat()
with self.assertRaisesRegexp(ValueError, 'When passing an infinitely '
'repeating dataset, you must specify the '
'`steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError, 'When passing an infinitely '
'repeating dataset, you must specify the '
'`steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError, 'When passing an infinitely '
'repeating dataset, you must specify the '
'`steps` argument'):
model.predict(dataset, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithLossMasking(test.TestCase,
parameterized.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_masking(self, distribution):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution,
experimental_run_tf_function=False)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution,
experimental_run_tf_function=False)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution,
experimental_run_tf_function=False)
batch_size = 64
if not distributed_training_utils.global_batch_size_supported(
distribution):
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@combinations.generate(all_strategy_combinations())
def test_eval_metrics_correctness(self, distribution):
with self.cached_session():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
distribute=distribution,
experimental_run_tf_function=False)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
y = np.ones((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@combinations.generate(strategy_and_input_combinations())
def test_correctness(self, distribution, use_numpy, use_validation_data):
with self.cached_session():
default_tolerance = 1e-5
tol_table = {}
if isinstance(distribution, (
mirrored_strategy.MirroredStrategy,
mirrored_strategy.CoreMirroredStrategy,
distribute_lib._DefaultDistributionStrategy)): # pylint: disable=protected-access
# TODO(b/119257215): Weights are not exactly the same, so use larger
# tolerance for now. Predict should be related to weights.
tol_table = {
'weights_1': 1e-4,
'weights_2': 1e-4,
'predict_result_1': 1e-4,
}
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
def _create_model():
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
return model
model = _create_model()
initial_weights = model.get_weights()
del model # avoid accident usage.
def fit_eval_and_predict(with_distribution=None):
model = _create_model()
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.5),
metrics=['mse'],
distribute=with_distribution,
experimental_run_tf_function=False)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
result['predict_result_1'] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
results_with_ds = fit_eval_and_predict(with_distribution=distribution)
results_without_ds = fit_eval_and_predict(with_distribution=None)
# Verify that the weights, training history, eval results, predict outputs
# are the same within some limits of tolerance.
for key in results_with_ds:
if (key.startswith('training_history') and
isinstance(distribution, tpu_strategy.TPUStrategyV1) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = tol_table.get(key, default_tolerance)
self.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
if __name__ == '__main__':
test.main()
| chemelnucfin/tensorflow | tensorflow/contrib/distribute/python/keras_backward_compat_test.py | Python | apache-2.0 | 43,076 | 0.011213 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
def setUp(self):
super(test_message_compose, self).setUp()
# create a 'pigs' and 'bird' groups that will be used through the various tests
self.group_bird_id = self.mail_group.create(self.cr, self.uid,
{'name': 'Bird', 'description': 'I am angry !'})
def test_00_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': 'a@a.a'})
user_admin = self.res_users.browse(cr, uid, uid)
p_a_id = user_admin.partner_id.id
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
# Mail data
_subject1 = 'Pigs'
_subject2 = 'Bird'
_body_html1 = 'Fans of Pigs, unite !'
_body_html2 = 'I am angry !'
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# Create template on mail.group, with attachments
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': False,
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d'
})
# ----------------------------------------
# CASE1: comment and save as template
# ----------------------------------------
# 1. Comment on pigs
compose_id = mail_compose.create(cr, uid,
{'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
{'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]})
compose = mail_compose.browse(cr, uid, compose_id)
# 2. Save current composition form as a template
mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
# Test: email_template subject, body_html, model
last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
last_template = email_template.browse(cr, uid, last_template_id)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
# ----------------------------------------
# CASE2: comment with template, save as template
# ----------------------------------------
# 1. Comment on pigs
context = {
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_use_template': False,
'default_template_id': email_template_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# Test: mail.compose.message: subject, body, partner_ids
self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# Test: mail.compose.message: attachments (owner has not been modified)
for attach in compose.attachment_ids:
self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# Test: mail.message: attachments
mail_compose.send_mail(cr, uid, [compose_id])
group_pigs.refresh()
message_pigs = group_pigs.message_ids[0]
for attach in message_pigs.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# ----------------------------------------
# CASE3: mass_mail with template
# ----------------------------------------
# 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
context = {
'default_composition_mode': 'mass_mail',
'default_notify': True,
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_template_id': email_template_id,
'default_partner_ids': [p_a_id],
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'mass_mail', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = [p_a_id]
self.assertEqual(compose.subject, '${object.name}', 'mail.compose.message subject incorrect')
self.assertEqual(compose.body, '<p>${object.description}</p>', 'mail.compose.message body incorrect') # todo: check signature
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# 2. Post the comment, get created message
mail_compose.send_mail(cr, uid, [compose_id], {'default_res_id': -1, 'active_ids': [self.group_pigs_id, self.group_bird_id]})
group_pigs.refresh()
group_bird.refresh()
message_pigs = group_pigs.message_ids[0]
message_bird = group_bird.message_ids[0]
# Test: subject, body
self.assertEqual(message_pigs.subject, _subject1, 'mail.message subject on Pigs incorrect')
self.assertEqual(message_bird.subject, _subject2, 'mail.message subject on Bird incorrect')
self.assertIn(_body_html1, message_pigs.body, 'mail.message body on Pigs incorrect')
self.assertIn(_body_html2, message_bird.body, 'mail.message body on Bird incorrect')
# Test: partner_ids: p_a_id (default) + 3 newly created partners
# message_pigs_pids = [partner.id for partner in message_pigs.notified_partner_ids]
# message_bird_pids = [partner.id for partner in message_bird.notified_partner_ids]
# partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# partner_ids.append(p_a_id)
# self.assertEqual(set(message_pigs_pids), set(partner_ids), 'mail.message on pigs incorrect number of notified_partner_ids')
# self.assertEqual(set(message_bird_pids), set(partner_ids), 'mail.message on bird notified_partner_ids incorrect')
# ----------------------------------------
# CASE4: test newly introduced partner_to field
# ----------------------------------------
# get already-created partners back
p_b_id = self.res_partner.search(cr, uid, [('email', '=', 'b@b.b')])[0]
p_c_id = self.res_partner.search(cr, uid, [('email', '=', 'c@c.c')])[0]
p_d_id = self.res_partner.search(cr, uid, [('email', '=', 'd@d.d')])[0]
# modify template: use partner_to, use template and email address in email_to to test all features together
user_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'res.users')])[0]
email_template.write(cr, uid, [email_template_id], {
'model_id': user_model_id,
'body_html': '${object.login}',
'email_to': '${object.email}, c@c.c',
'partner_to': '%i,%i' % (p_b_id, p_c_id),
'email_cc': 'd@d.d',
})
# patner by email + partner by id (no double)
send_to = [p_a_id, p_b_id, p_c_id, p_d_id]
# Generate messsage with default email and partner on template
mail_value = mail_compose.generate_email_for_composer(cr, uid, email_template_id, uid)
self.assertEqual(set(mail_value['partner_ids']), set(send_to), 'mail.message partner_ids list created by template is incorrect')
@mute_logger('openerp.models')
def test_10_email_templating(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid, context = self.cr, self.uid, {}
# create the email.template on mail.group model
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'email_from': 'Raoul Grosbedon <raoul@example.com>',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': True,
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d',
'partner_to': '${user.partner_id.id},%s,%s,-1' % (self.user_raoul.partner_id.id, self.user_bert.partner_id.id)
})
# not force send: email_recipients is not taken into account
msg_id = email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, context=context)
mail = self.mail_mail.browse(cr, uid, msg_id, context=context)
self.assertEqual(mail.subject, 'Pigs', 'email_template: send_mail: wrong subject')
self.assertEqual(mail.email_to, 'b@b.b, c@c.c', 'email_template: send_mail: wrong email_to')
self.assertEqual(mail.email_cc, 'd@d.d', 'email_template: send_mail: wrong email_cc')
self.assertEqual(
set([partner.id for partner in mail.recipient_ids]),
set((self.partner_admin_id, self.user_raoul.partner_id.id, self.user_bert.partner_id.id)),
'email_template: send_mail: wrong management of partner_to')
# force send: take email_recipients into account
email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, force_send=True, context=context)
sent_emails = self._build_email_kwargs_list
email_to_lst = [
['b@b.b', 'c@c.c'], ['Administrator <admin@yourcompany.example.com>'],
['Raoul Grosbedon <raoul@raoul.fr>'], ['Bert Tartignole <bert@bert.fr>']]
self.assertEqual(len(sent_emails), 4, 'email_template: send_mail: 3 valid email recipients + email_to -> should send 4 emails')
for email in sent_emails:
self.assertIn(email['email_to'], email_to_lst, 'email_template: send_mail: wrong email_recipients')
| ncliam/serverpos | openerp/addons/email_template/tests/test_mail.py | Python | agpl-3.0 | 14,322 | 0.004538 |
import Gears as gears
from .. import *
from .Filter import *
class CellLti7(Filter) :
def applyWithArgs(
self,
stimulus,
) :
sequence = stimulus.getSequence().getPythonObject()
stimulus.setLtiMatrix(
[
0, 0.47494, -0.0966925, 0.150786, -0.0647224, 0.0574935, 0.0180677 , 0.00359244 ,
0.383115, 0.865133, 0.366234, -0.100492, -0.017631, 0.0105551, 0.00101862 , 0.000257363 ,
0.330595, -0.366234, 0.768675, 0.175282, 0.1717, -0.0244711, -0.00537899 , -0.000588159 ,
-0.247068, 0.100492, 0.175282, 0.810277, -0.302384, 0.0166167, 0.00747239 , 0.000383462 ,
0.157351, -0.017631, -0.1717, 0.302384, 0.393383, 0.3339, 0.0221983 , 0.00646065 ,
0.0351307, -0.0105551, -0.0244711, 0.0166167, -0.3339, 0.265154, -0.33863 , -0.0135562 ,
-0.00584964, 0.00101862, 0.00537899, -0.00747239, 0.0221983, 0.33863, 0.186403 , -0.308048 ,
0.000798099, -0.000257363, -0.000588159, 0.000383462, -0.00646065, -0.0135562, 0.308048 , 0.11294 ,
]
)
| szecsi/Gears | GearsPy/Project/Components/Temporal/CellLti7.py | Python | gpl-2.0 | 1,209 | 0.033085 |
try:
from setuptools import setup
from setuptools.command.build_py import build_py
setuptools = True
except:
from distutils.core import setup
from distutils.command.build_py import build_py
setuptools = False
import os, re
# XXX: This is a hack
def patch(func):
setattr(build_py, func.__name__, func)
@patch
def find_modules(self):
return [('', 'hytest', 'hytest.hy')]
@patch
def get_module_outfile(self, build_dir, *_):
return os.path.join(build_dir, 'hytest.hy')
this_dir = os.path.dirname(__file__)
with open(os.path.join(this_dir, 'README.rst')) as f:
readme = f.read()
with open(os.path.join(this_dir, 'hytest.hy')) as f:
version = re.search(r'\(def __version__ "([^"]+)"\)', f.read()).group(1)
with open(os.path.join(this_dir, 'requirements.txt')) as f:
hy_ver = f.read().strip()
kw = {}
if setuptools:
kw['install_requires'] = hy_ver
setup(
name='HyTest',
version=version,
description='A testing framework for Hy',
long_description=readme,
author='Ryan Gonzalez',
author_email='rymg19@gmail.com',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Testing'
],
requires=[hy_ver.replace('>= ', '(>=')+')'],
scripts=['hytest'],
py_modules=['hytest'],
url='https://github.com/kirbyfan64/hytest',
**kw)
| kirbyfan64/hytest | setup.py | Python | mit | 1,421 | 0.004222 |
from __future__ import unicode_literals
import json
def test_get_version(app, client, current_version):
headers = [('Accept', 'application/json'),
('Content-Type', 'application/json')]
res = client.get('/api/v0/version', headers=headers)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['version'] == current_version
| ecolell/pfamserver | tests/api/v0/test_version.py | Python | agpl-3.0 | 394 | 0 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for embedding layers."""
from absl.testing import parameterized
import tensorflow as tf
from etcmodel import layers as etc_layers
class LayersTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_2d_ids_no_mask(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, 0, 4], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[
[1.3, -1.3], #
[1.2, -1.2], #
[1.1, -1.1], #
], #
[
[1.4, -1.4], #
[1.0, -1.0], #
[1.4, -1.4], #
], #
]
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_2d_ids_with_mask(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, -1, 5], #
])
input_mask = tf.constant([
[1, 1, 0], #
[1, 0, 0], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[
[1.3, -1.3], #
[1.2, -1.2], #
[0.0, 0.0], #
], #
[
[1.4, -1.4], #
[0.0, 0.0], #
[0.0, 0.0], #
], #
]
result = layer(input_ids, input_mask=input_mask)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_1d_ids(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([1, 0, 0, 3])
input_mask = tf.constant([1, 1, 0, 1])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[1.1, -1.1], #
[1.0, -1.0], #
[0.0, 0.0], #
[1.3, -1.3], #
]
result = layer(input_ids, input_mask=input_mask)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_3d_ids(self, use_one_hot_lookup):
embedding_table = tf.constant([
[1.0, -1.0], #
[1.1, -1.1], #
[1.2, -1.2], #
[1.3, -1.3], #
[1.4, -1.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([[
[3, 2, 1], #
[4, 0, 4], #
]])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [[
[
[1.3, -1.3], #
[1.2, -1.2], #
[1.1, -1.1], #
], #
[
[1.4, -1.4], #
[1.0, -1.0], #
[1.4, -1.4], #
], #
]]
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_embedding_lookup_random_init_no_mask(self, use_one_hot_lookup):
vocab_size = 5
embedding_size = 2
input_ids = tf.constant([1, 0, 0, 3])
input_size = input_ids.shape.as_list()[0]
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
use_one_hot_lookup=use_one_hot_lookup)
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(result)
self.assertAllEqual([input_size, embedding_size], result.shape)
@parameterized.named_parameters(
('no_projection', 0),
('embedding_size_equals_projection_size', 3),
)
def test_embedding_lookup_no_projection(self, projection_size):
# Create an embedding table with width = projection_size
embedding_table = tf.constant([
[1.0, -1.0, 0.5], #
[1.1, -1.1, -0.5], #
[1.2, -1.2, -0.2], #
[1.3, -1.3, 0.3], #
[1.4, -1.4, 0.4], #
])
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, 0, 4], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
projection_size=projection_size,
use_one_hot_lookup=True)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
expected = [
[
[1.3, -1.3, 0.3], #
[1.2, -1.2, -0.2], #
[1.1, -1.1, -0.5], #
], #
[
[1.4, -1.4, 0.4], #
[1.0, -1.0, 0.5], #
[1.4, -1.4, 0.4], #
], #
]
result = layer(input_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
def test_embedding_lookup_with_projection(self):
# Create an embedding table with width != projection_size
embedding_table = tf.constant([
[1.0, -1.0, 0.5], #
[1.1, -1.1, -0.4], #
[1.2, -1.2, -0.5], #
[1.3, -1.3, 0.8], #
[1.4, -1.4, 0.9], #
])
projection_size = 2 # Different from the embedding_dimension.
vocab_size, embedding_size = embedding_table.shape.as_list()
input_ids = tf.constant([
[3, 2, 1], #
[4, 0, 4], #
])
input_mask = tf.constant([
[1, 0, 0], #
[0, 0, 1], #
])
layer = etc_layers.EmbeddingLookup(
vocab_size=vocab_size,
embedding_size=embedding_size,
projection_size=projection_size,
use_one_hot_lookup=True)
layer.build(None) # Shapes are unused so we pass None.
layer.embedding_table = embedding_table
# Dense layer to use for projection. Note that, we have a non-zero
# bias initializer here to ensure that the bias term doesn't get through
# to the masked_ids after projection.
layer.embedding_projection = tf.keras.layers.Dense(
units=projection_size,
activation=None,
use_bias=True,
kernel_initializer='ones',
bias_initializer='ones')
expected = [
[
[1.8, 1.8], # [1.3, -1.3, 0.8] * kernel_initializer + 1 (bias).
[0., 0.], #
[0., 0.], #
], #
[
[0., 0.], #
[0., 0.], #
[1.9, 1.9], # [1.4, -1.4, 0.9] * kernel_initializer + 1 (bias).
], #
]
result = layer(input_ids, input_mask)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected, result)
if __name__ == '__main__':
tf.test.main()
| google-research/google-research | etcmodel/layers/embedding_test.py | Python | apache-2.0 | 9,033 | 0.001661 |
import re
from sublime import Region
import sublime_plugin
class BaseBlockCommand(sublime_plugin.TextCommand):
def _get_row_text(self, row):
if row < 0 or row > self.view.rowcol(self.view.size())[0]:
raise RuntimeError('Cannot find table bounds.')
point = self.view.text_point(row, 0)
region = self.view.line(point)
text = self.view.substr(region)
return text
def get_cursor_position(self):
return self.view.rowcol(self.view.sel()[0].begin())
def get_block_bounds(self):
"""given the cursor position as started point,
returns the limits and indentation"""
row, col = self.get_cursor_position()
upper = lower = row
try:
while self._get_row_text(upper - 1).strip():
upper -= 1
except Exception as e:
print(e)
pass
else:
upper += 1
try:
while self._get_row_text(lower + 1).strip():
lower += 1
except Exception as e:
print(e)
pass
else:
lower -= 1
block_region = Region(self.view.text_point(upper - 1, 0),
self.view.text_point(lower + 2, 0))
lines = [self.view.substr(region) for region in self.view.lines(block_region)]
indent = re.match('^(\s*).*$', self._get_row_text(upper - 1)).group(1)
return block_region, lines, indent
| ericholscher/sublime-rst-completion | helpers.py | Python | bsd-3-clause | 1,480 | 0.001351 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.