text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import os
import shakedown
DEFAULT_NODE_COUNT = 3
PACKAGE_NAME = 'mds-cassandra'
TASK_RUNNING_STATE = 'TASK_RUNNING'
DCOS_URL = shakedown.run_dcos_command('config show core.dcos_url')[0].strip()
# expected SECURITY values: 'permissive', 'strict', 'disabled'
if os.environ.get('SECURITY', '') == 'strict':
print('Using strict mode test configuration')
PRINCIPAL = 'service-acct'
DEFAULT_OPTIONS_DICT = {
"service": {
"user": "nobody",
"principal": PRINCIPAL,
"secret_name": "secret"
}
}
else:
print('Using default test configuration')
PRINCIPAL = 'cassandra-principal'
DEFAULT_OPTIONS_DICT = {}
|
akshitjain/dcos-cassandra-service
|
integration/tests/defaults.py
|
Python
|
apache-2.0
| 678 | 0 |
import warnings
warnings.simplefilter('ignore', Warning)
from elasticsearch_tests.tests.inputs import *
from elasticsearch_tests.tests.elasticsearch_query import *
from elasticsearch_tests.tests.elasticsearch_backend import *
|
manelore/django-haystack
|
tests/elasticsearch_tests/tests/__init__.py
|
Python
|
bsd-3-clause
| 227 | 0.013216 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'djangofeeds_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('domain', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal(u'djangofeeds', ['Category'])
# Adding unique constraint on 'Category', fields ['name', 'domain']
db.create_unique(u'djangofeeds_category', ['name', 'domain'])
# Adding model 'Feed'
db.create_table(u'djangofeeds_feed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('feed_url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('link', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('http_etag', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('http_last_modified', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_last_refresh', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('ratio', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('sort', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('date_last_requested', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('freq', self.gf('django.db.models.fields.IntegerField')(default=10800)),
))
db.send_create_signal(u'djangofeeds', ['Feed'])
# Adding M2M table for field categories on 'Feed'
m2m_table_name = db.shorten_name(u'djangofeeds_feed_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('feed', models.ForeignKey(orm[u'djangofeeds.feed'], null=False)),
('category', models.ForeignKey(orm[u'djangofeeds.category'], null=False))
))
db.create_unique(m2m_table_name, ['feed_id', 'category_id'])
# Adding model 'Enclosure'
db.create_table(u'djangofeeds_enclosure', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('type', self.gf('django.db.models.fields.CharField')(max_length=200)),
('length', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'djangofeeds', ['Enclosure'])
# Adding model 'Post'
db.create_table(u'djangofeeds_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djangofeeds.Feed'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('link', self.gf('django.db.models.fields.URLField')(max_length=2048)),
('content', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('guid', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('author', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('date_published', self.gf('django.db.models.fields.DateField')()),
('date_updated', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'djangofeeds', ['Post'])
# Adding M2M table for field enclosures on 'Post'
m2m_table_name = db.shorten_name(u'djangofeeds_post_enclosures')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'djangofeeds.post'], null=False)),
('enclosure', models.ForeignKey(orm[u'djangofeeds.enclosure'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'enclosure_id'])
# Adding M2M table for field categories on 'Post'
m2m_table_name = db.shorten_name(u'djangofeeds_post_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'djangofeeds.post'], null=False)),
('category', models.ForeignKey(orm[u'djangofeeds.category'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'category_id'])
def backwards(self, orm):
# Removing unique constraint on 'Category', fields ['name', 'domain']
db.delete_unique(u'djangofeeds_category', ['name', 'domain'])
# Deleting model 'Category'
db.delete_table(u'djangofeeds_category')
# Deleting model 'Feed'
db.delete_table(u'djangofeeds_feed')
# Removing M2M table for field categories on 'Feed'
db.delete_table(db.shorten_name(u'djangofeeds_feed_categories'))
# Deleting model 'Enclosure'
db.delete_table(u'djangofeeds_enclosure')
# Deleting model 'Post'
db.delete_table(u'djangofeeds_post')
# Removing M2M table for field enclosures on 'Post'
db.delete_table(db.shorten_name(u'djangofeeds_post_enclosures'))
# Removing M2M table for field categories on 'Post'
db.delete_table(db.shorten_name(u'djangofeeds_post_categories'))
models = {
u'djangofeeds.category': {
'Meta': {'unique_together': "(('name', 'domain'),)", 'object_name': 'Category'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'djangofeeds.enclosure': {
'Meta': {'object_name': 'Enclosure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'djangofeeds.feed': {
'Meta': {'ordering': "('id',)", 'object_name': 'Feed'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['djangofeeds.Category']", 'symmetrical': 'False'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_refresh': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_last_requested': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'freq': ('django.db.models.fields.IntegerField', [], {'default': '10800'}),
'http_etag': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'http_last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ratio': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'sort': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
u'djangofeeds.post': {
'Meta': {'object_name': 'Post'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['djangofeeds.Category']", 'symmetrical': 'False'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateField', [], {}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {}),
'enclosures': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['djangofeeds.Enclosure']", 'symmetrical': 'False', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['djangofeeds.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '2048'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['djangofeeds']
|
chrisspen/django-feeds
|
djangofeeds/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 10,467 | 0.006879 |
#!/usr/bin/python3
from mymodule import *
sayhi()
# __version__ 不会导入
# print('Version: ', __version__)
|
louistin/thinkstation
|
a_byte_of_python/unit_9_module/mymodule_demo3.py
|
Python
|
mit
| 114 | 0.009434 |
#!/bin/sh
"""": # -*-python-*-
bup_python="$(dirname "$0")/bup-python" || exit $?
exec "$bup_python" "$0" ${1+"$@"}
"""
# end of bup preamble
import sys, getopt, socket, subprocess, fcntl
from bup import options, path
from bup.helpers import *
optspec = """
bup daemon [options...] -- [bup-server options...]
--
l,listen ip address to listen on, defaults to *
p,port port to listen on, defaults to 1982
"""
o = options.Options(optspec, optfunc=getopt.getopt)
(opt, flags, extra) = o.parse(sys.argv[1:])
host = opt.listen
port = opt.port and int(opt.port) or 1982
import socket
import sys
socks = []
e = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as e:
continue
try:
if af == socket.AF_INET6:
log("bup daemon: listening on [%s]:%s\n" % sa[:2])
else:
log("bup daemon: listening on %s:%s\n" % sa[:2])
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(sa)
s.listen(1)
fcntl.fcntl(s.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
except socket.error as e:
s.close()
continue
socks.append(s)
if not socks:
log('bup daemon: listen socket: %s\n' % e.args[1])
sys.exit(1)
try:
while True:
[rl,wl,xl] = select.select(socks, [], [], 60)
for l in rl:
s, src = l.accept()
try:
log("Socket accepted connection from %s\n" % (src,))
fd1 = os.dup(s.fileno())
fd2 = os.dup(s.fileno())
s.close()
sp = subprocess.Popen([path.exe(), 'mux', '--',
path.exe(), 'server']
+ extra, stdin=fd1, stdout=fd2)
finally:
os.close(fd1)
os.close(fd2)
finally:
for l in socks:
l.shutdown(socket.SHUT_RDWR)
l.close()
debug1("bup daemon: done")
|
tjanez/bup
|
cmd/daemon-cmd.py
|
Python
|
lgpl-2.1
| 2,128 | 0.003289 |
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import math
import paramiko
import random
import re
import time
import unicodedata
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import ssh_utils
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.ibm.storwize_svc import (
replication as storwize_rep)
from cinder.volume.drivers.ibm.storwize_svc import storwize_const
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
INTERVAL_1_SEC = 1
DEFAULT_TIMEOUT = 15
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.ListOpt('storwize_svc_volpool_name',
default=['volpool'],
help='Comma separated list of storage system storage '
'pools for volumes.'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
min=-1, max=100,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
min=-1, max=100,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.StrOpt('storwize_svc_vol_iogrp',
default='0',
help='The I/O group in which to allocate volumes. It can be a '
'comma-separated list in which case the driver will select an '
'io_group based on least number of volumes associated with the '
'io_group.'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
min=1, max=600,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared.'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='This option no longer has any affect. It is deprecated '
'and will be removed in the next release.',
deprecated_for_removal=True),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
cfg.StrOpt('storwize_san_secondary_ip',
default=None,
help='Specifies secondary management IP or hostname to be '
'used if san_ip is invalid or becomes inaccessible.'),
cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
default=False,
help='Specifies that the volume not be formatted during '
'creation.'),
cfg.IntOpt('storwize_svc_flashcopy_rate',
default=50,
min=1, max=100,
help='Specifies the Storwize FlashCopy copy rate to be used '
'when creating a full volume copy. The default is rate '
'is 50, and the valid rates are 1-100.'),
cfg.StrOpt('storwize_svc_mirror_pool',
default=None,
help='Specifies the name of the pool in which mirrored copy '
'is stored. Example: "pool2"'),
cfg.IntOpt('cycle_period_seconds',
default=300,
min=60, max=86400,
help='This defines an optional cycle period that applies to '
'Global Mirror relationships with a cycling mode of multi. '
'A Global Mirror relationship using the multi cycling_mode '
'performs a complete cycle at most once each period. '
'The default is 300 seconds, and the valid seconds '
'are 60-86400.'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP)
class StorwizeSSH(object):
"""SSH interface to IBM Storwize family and SVC storage systems."""
def __init__(self, run_ssh):
self._ssh = run_ssh
def _run_ssh(self, ssh_cmd):
try:
return self._ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
"""Run an SSH command and return parsed output."""
raw = self._run_ssh(ssh_cmd)
return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
with_header=with_header)
def run_ssh_assert_no_output(self, ssh_cmd):
"""Run an SSH command and assert no output returned."""
out, err = self._run_ssh(ssh_cmd)
if len(out.strip()) != 0:
msg = (_('Expected no output from CLI command %(cmd)s, '
'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_check_created(self, ssh_cmd):
"""Run an SSH command and return the ID of the created object."""
out, err = self._run_ssh(ssh_cmd)
try:
match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
return match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsnode(self, node_id=None):
with_header = True
ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
if node_id:
with_header = False
ssh_cmd.append(node_id)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lslicense(self):
ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsguicapabilities(self):
ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lssystem(self):
ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsmdiskgrp(self, pool):
ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!',
'"%s"' % pool]
try:
return self.run_ssh_info(ssh_cmd)[0]
except exception.VolumeBackendAPIException as ex:
LOG.warning("Failed to get pool %(pool)s info. "
"Exception: %(ex)s.", {'pool': pool,
'ex': ex})
return None
def lsiogrp(self):
ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsportip(self):
ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
@staticmethod
def _create_port_arg(port_type, port_name):
if port_type == 'initiator':
port = ['-iscsiname']
else:
port = ['-hbawwpn']
port.append(port_name)
return port
def mkhost(self, host_name, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'mkhost', '-force'] + port
ssh_cmd += ['-name', '"%s"' % host_name]
return self.run_ssh_check_created(ssh_cmd)
def addhostport(self, host, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lshost(self, host=None):
with_header = True
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
if host:
with_header = False
ssh_cmd.append('"%s"' % host)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def add_chap_secret(self, secret, host):
ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lsiscsiauth(self):
ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfabric(self, wwpn=None, host=None):
ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!']
if wwpn:
ssh_cmd.extend(['-wwpn', wwpn])
elif host:
ssh_cmd.extend(['-host', '"%s"' % host])
else:
msg = (_('Must pass wwpn or host to lsfabric.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
"""Map vdisk to host.
If vdisk already mapped and multihostmap is True, use the force flag.
"""
ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host, vdisk]
if lun:
ssh_cmd.insert(ssh_cmd.index(vdisk), '-scsi')
ssh_cmd.insert(ssh_cmd.index(vdisk), lun)
if multihostmap:
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
try:
self.run_ssh_check_created(ssh_cmd)
result_lun = self.get_vdiskhostmapid(vdisk, host)
if result_lun is None or (lun and lun != result_lun):
msg = (_('mkvdiskhostmap error:\n command: %(cmd)s\n '
'lun: %(lun)s\n result_lun: %(result_lun)s') %
{'cmd': ssh_cmd,
'lun': lun,
'result_lun': result_lun})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return result_lun
except Exception as ex:
if (not multihostmap and hasattr(ex, 'message') and
'CMMVC6071E' in ex.message):
LOG.error('storwize_svc_multihostmap_enabled is set '
'to False, not allowing multi host mapping.')
raise exception.VolumeDriverException(
message=_('CMMVC6071E The VDisk-to-host mapping was not '
'created because the VDisk is already mapped '
'to a host.\n"'))
with excutils.save_and_reraise_exception():
LOG.error('Error mapping VDisk-to-host')
def mkrcrelationship(self, master, aux, system, asyncmirror,
cyclingmode=False):
ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master,
'-aux', aux, '-cluster', system]
if asyncmirror:
ssh_cmd.append('-global')
if cyclingmode:
ssh_cmd.extend(['-cyclingmode', 'multi'])
return self.run_ssh_check_created(ssh_cmd)
def rmrcrelationship(self, relationship, force=False):
ssh_cmd = ['svctask', 'rmrcrelationship']
if force:
ssh_cmd += ['-force']
ssh_cmd += [relationship]
self.run_ssh_assert_no_output(ssh_cmd)
def switchrelationship(self, relationship, aux=True):
primary = 'aux' if aux else 'master'
ssh_cmd = ['svctask', 'switchrcrelationship', '-primary',
primary, relationship]
self.run_ssh_assert_no_output(ssh_cmd)
def startrcrelationship(self, rc_rel, primary=None):
ssh_cmd = ['svctask', 'startrcrelationship', '-force']
if primary:
ssh_cmd.extend(['-primary', primary])
ssh_cmd.append(rc_rel)
self.run_ssh_assert_no_output(ssh_cmd)
def ch_rcrelationship_cycleperiod(self, relationship,
cycle_period_seconds):
# Note: Can only change one attribute at a time,
# so define two ch_rcrelationship_xxx here
if cycle_period_seconds:
ssh_cmd = ['svctask', 'chrcrelationship']
ssh_cmd.extend(['-cycleperiodseconds',
six.text_type(cycle_period_seconds)])
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def ch_rcrelationship_changevolume(self, relationship,
changevolume, master):
# Note: Can only change one attribute at a time,
# so define two ch_rcrelationship_xxx here
if changevolume:
ssh_cmd = ['svctask', 'chrcrelationship']
if master:
ssh_cmd.extend(['-masterchange', changevolume])
else:
ssh_cmd.extend(['-auxchange', changevolume])
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def stoprcrelationship(self, relationship, access=False):
ssh_cmd = ['svctask', 'stoprcrelationship']
if access:
ssh_cmd.append('-access')
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def lsrcrelationship(self, rc_rel):
ssh_cmd = ['svcinfo', 'lsrcrelationship', '-delim', '!', rc_rel]
return self.run_ssh_info(ssh_cmd)
def lspartnership(self, system_name):
key_value = 'name=%s' % system_name
ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue',
key_value, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lspartnershipcandidate(self):
ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkippartnership(self, ip_v4, bandwith=1000, backgroundcopyrate=50):
ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4',
'-clusterip', ip_v4, '-linkbandwidthmbits',
six.text_type(bandwith),
'-backgroundcopyrate', six.text_type(backgroundcopyrate)]
return self.run_ssh_assert_no_output(ssh_cmd)
def mkfcpartnership(self, system_name, bandwith=1000,
backgroundcopyrate=50):
ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits',
six.text_type(bandwith),
'-backgroundcopyrate', six.text_type(backgroundcopyrate),
system_name]
return self.run_ssh_assert_no_output(ssh_cmd)
def chpartnership(self, partnership_id, start=True):
action = '-start' if start else '-stop'
ssh_cmd = ['svctask', 'chpartnership', action, partnership_id]
return self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskhostmap(self, host, vdisk):
ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host,
'"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskhostmap(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lshostvdiskmap(self, host):
ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host]
return self.run_ssh_info(ssh_cmd, with_header=True)
def get_vdiskhostmapid(self, vdisk, host):
resp = self.lsvdiskhostmap(vdisk)
for mapping_info in resp:
if mapping_info['host_name'] == host:
lun_id = mapping_info['SCSI_id']
return lun_id
return None
def rmhost(self, host):
ssh_cmd = ['svctask', 'rmhost', '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def mkvdisk(self, name, size, units, pool, opts, params):
ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp',
'"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']),
'-size', size, '-unit', units] + params
try:
return self.run_ssh_check_created(ssh_cmd)
except Exception as ex:
if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg:
vdisk = self.lsvdisk(name)
if vdisk:
LOG.warning('CMMVC6372W The virtualized storage '
'capacity that the cluster is using is '
'approaching the virtualized storage '
'capacity that is licensed.')
return vdisk['id']
with excutils.save_and_reraise_exception():
LOG.exception('Failed to create vdisk %(vol)s.',
{'vol': name})
def rmvdisk(self, vdisk, force=True):
ssh_cmd = ['svctask', 'rmvdisk']
if force:
ssh_cmd += ['-force']
ssh_cmd += ['"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdisk(self, vdisk):
"""Return vdisk attributes or None if it doesn't exist."""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
'"%s"' % vdisk]
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if not err:
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)[0]
if 'CMMVC5754E' in err:
return None
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsvdisks_from_filter(self, filter_name, value):
"""Performs an lsvdisk command, filtering the results as specified.
Returns an iterable for all matching vdisks.
"""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
'-filtervalue', '%s=%s' % (filter_name, value)]
return self.run_ssh_info(ssh_cmd, with_header=True)
def chvdisk(self, vdisk, params):
ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def movevdisk(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def expandvdisksize(self, vdisk, amount):
ssh_cmd = (
['svctask', 'expandvdisksize', '-size', six.text_type(amount),
'-unit', 'gb', '"%s"' % vdisk])
self.run_ssh_assert_no_output(ssh_cmd)
def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None):
ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target',
'"%s"' % target, '-autodelete']
if not full_copy:
ssh_cmd.extend(['-copyrate', '0'])
else:
ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)])
if consistgrp:
ssh_cmd.extend(['-consistgrp', consistgrp])
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if 'successfully created' not in out:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
'successfully created', out)
fc_map_id = match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return fc_map_id
def prestartfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'startfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def prestartfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
'-autodelete', autodel, fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'stopfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def rmfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskfcmappings(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!',
'"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcmap(self, fc_map_id):
ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
'id=%s' % fc_map_id, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcconsistgrp(self, fc_consistgrp):
ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
out, err = self._ssh(ssh_cmd)
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)
def mkfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
return self.run_ssh_check_created(ssh_cmd)
def rmfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
return self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskcopy(self, vdisk, dest_pool, params, auto_delete):
ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
'"%s"' % dest_pool])
if auto_delete:
ssh_cmd += ['-autodelete']
ssh_cmd += ['"%s"' % vdisk]
return self.run_ssh_check_created(ssh_cmd)
def lsvdiskcopy(self, vdisk, copy_id=None):
ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
with_header = True
if copy_id:
ssh_cmd += ['-copy', copy_id]
with_header = False
ssh_cmd += ['"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lsvdisksyncprogress(self, vdisk, copy_id):
ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
'-copy', copy_id, '"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)[0]
def rmvdiskcopy(self, vdisk, copy_id):
ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp,
'"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsportfc(self, node_id):
ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!',
'-filtervalue', 'node_id=%s' % node_id]
return self.run_ssh_info(ssh_cmd, with_header=True)
def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, '-copy',
copy_id, '-vdisk', vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
class StorwizeHelpers(object):
# All the supported QoS key are saved in this dict. When a new
# key is going to add, three values MUST be set:
# 'default': to indicate the value, when the parameter is disabled.
# 'param': to indicate the corresponding parameter in the command.
# 'type': to indicate the type of this value.
WAIT_TIME = 5
svc_qos_keys = {'IOThrottling': {'default': '0',
'param': 'rate',
'type': int}}
def __init__(self, run_ssh):
self.ssh = StorwizeSSH(run_ssh)
self.check_fcmapping_interval = 3
@staticmethod
def handle_keyerror(cmd, out):
msg = (_('Could not find key in output of command %(cmd)s: %(out)s.')
% {'out': out, 'cmd': cmd})
raise exception.VolumeBackendAPIException(data=msg)
def compression_enabled(self):
"""Return whether or not compression is enabled for this system."""
resp = self.ssh.lslicense()
keys = ['license_compression_enclosures',
'license_compression_capacity']
for key in keys:
if resp.get(key, '0') != '0':
return True
# lslicense is not used for V9000 compression check
# compression_enclosures and compression_capacity are
# always 0. V9000 uses license_scheme 9846 as an
# indicator and can always do compression
try:
resp = self.ssh.lsguicapabilities()
if resp.get('license_scheme', '0') == '9846':
return True
except exception.VolumeBackendAPIException:
LOG.exception("Failed to fetch licensing scheme.")
return False
def replication_licensed(self):
"""Return whether or not replication is enabled for this system."""
# Uses product_key as an indicator to check
# whether replication is supported in storage.
try:
resp = self.ssh.lsguicapabilities()
product_key = resp.get('product_key', '0')
if product_key in storwize_const.REP_CAP_DEVS:
return True
except exception.VolumeBackendAPIException as war:
LOG.warning("Failed to run lsguicapability. Exception: %s.", war)
return False
def get_system_info(self):
"""Return system's name, ID, and code level."""
resp = self.ssh.lssystem()
level = resp['code_level']
match_obj = re.search('([0-9].){3}[0-9]', level)
if match_obj is None:
msg = _('Failed to get code level (%s).') % level
raise exception.VolumeBackendAPIException(data=msg)
code_level = match_obj.group().split('.')
return {'code_level': tuple([int(x) for x in code_level]),
'system_name': resp['name'],
'system_id': resp['id']}
def get_pool_attrs(self, pool):
"""Return attributes for the specified pool."""
return self.ssh.lsmdiskgrp(pool)
def is_pool_defined(self, pool_name):
"""Check if vdisk is defined."""
attrs = self.get_pool_attrs(pool_name)
return attrs is not None
def get_available_io_groups(self):
"""Return list of available IO groups."""
iogrps = []
resp = self.ssh.lsiogrp()
for iogrp in resp:
try:
if int(iogrp['node_count']) > 0:
iogrps.append(int(iogrp['id']))
except KeyError:
self.handle_keyerror('lsiogrp', iogrp)
except ValueError:
msg = (_('Expected integer for node_count, '
'svcinfo lsiogrp returned: %(node)s.') %
{'node': iogrp['node_count']})
raise exception.VolumeBackendAPIException(data=msg)
return iogrps
def get_vdisk_count_by_io_group(self):
res = {}
resp = self.ssh.lsiogrp()
for iogrp in resp:
try:
if int(iogrp['node_count']) > 0:
res[int(iogrp['id'])] = int(iogrp['vdisk_count'])
except KeyError:
self.handle_keyerror('lsiogrp', iogrp)
except ValueError:
msg = (_('Expected integer for node_count, '
'svcinfo lsiogrp returned: %(node)s') %
{'node': iogrp['node_count']})
raise exception.VolumeBackendAPIException(data=msg)
return res
def select_io_group(self, state, opts):
selected_iog = 0
iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
if len(iog_list) == 0:
raise exception.InvalidInput(
reason=_('Given I/O group(s) %(iogrp)s not valid; available '
'I/O groups are %(avail)s.')
% {'iogrp': opts['iogrp'],
'avail': state['available_iogrps']})
iog_vdc = self.get_vdisk_count_by_io_group()
LOG.debug("IO group current balance %s", iog_vdc)
min_vdisk_count = iog_vdc[iog_list[0]]
selected_iog = iog_list[0]
for iog in iog_list:
if iog_vdc[iog] < min_vdisk_count:
min_vdisk_count = iog_vdc[iog]
selected_iog = iog
LOG.debug("Selected io_group is %d", selected_iog)
return selected_iog
def get_volume_io_group(self, vol_name):
vdisk = self.ssh.lsvdisk(vol_name)
if vdisk:
resp = self.ssh.lsiogrp()
for iogrp in resp:
if iogrp['name'] == vdisk['IO_group_name']:
return int(iogrp['id'])
return None
def get_node_info(self):
"""Return dictionary containing information on system's nodes."""
nodes = {}
resp = self.ssh.lsnode()
for node_data in resp:
try:
if node_data['status'] != 'online':
continue
node = {}
node['id'] = node_data['id']
node['name'] = node_data['name']
node['IO_group'] = node_data['IO_group_id']
node['iscsi_name'] = node_data['iscsi_name']
node['WWNN'] = node_data['WWNN']
node['status'] = node_data['status']
node['WWPN'] = []
node['ipv4'] = []
node['ipv6'] = []
node['enabled_protocols'] = []
nodes[node['id']] = node
except KeyError:
self.handle_keyerror('lsnode', node_data)
return nodes
def add_iscsi_ip_addrs(self, storage_nodes):
"""Add iSCSI IP addresses to system node information."""
resp = self.ssh.lsportip()
for ip_data in resp:
try:
state = ip_data['state']
if ip_data['node_id'] in storage_nodes and (
state == 'configured' or state == 'online'):
node = storage_nodes[ip_data['node_id']]
if len(ip_data['IP_address']):
node['ipv4'].append(ip_data['IP_address'])
if len(ip_data['IP_address_6']):
node['ipv6'].append(ip_data['IP_address_6'])
except KeyError:
self.handle_keyerror('lsportip', ip_data)
def add_fc_wwpns(self, storage_nodes):
"""Add FC WWPNs to system node information."""
for key in storage_nodes:
node = storage_nodes[key]
wwpns = set(node['WWPN'])
resp = self.ssh.lsportfc(node_id=node['id'])
for port_info in resp:
if (port_info['type'] == 'fc' and
port_info['status'] == 'active'):
wwpns.add(port_info['WWPN'])
node['WWPN'] = list(wwpns)
LOG.info('WWPN on node %(node)s: %(wwpn)s.',
{'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
chap_secret = utils.generate_password()
self.ssh.add_chap_secret(chap_secret, host_name)
return chap_secret
def get_chap_secret_for_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
resp = self.ssh.lsiscsiauth()
host_found = False
for host_data in resp:
try:
if host_data['name'] == host_name:
host_found = True
if host_data['iscsi_auth_method'] == 'chap':
return host_data['iscsi_chap_secret']
except KeyError:
self.handle_keyerror('lsiscsiauth', host_data)
if not host_found:
msg = _('Failed to find host %s.') % host_name
raise exception.VolumeBackendAPIException(data=msg)
return None
def get_conn_fc_wwpns(self, host):
wwpns = set()
resp = self.ssh.lsfabric(host=host)
for wwpn in resp.select('local_wwpn'):
if wwpn is not None:
wwpns.add(wwpn)
return list(wwpns)
def get_host_from_connector(self, connector, volume_name=None,
iscsi=False):
"""Return the Storwize host described by the connector."""
LOG.debug('Enter: get_host_from_connector: %s.', connector)
# If we have FC information, we have a faster lookup option
host_name = None
if 'wwpns' in connector and not iscsi:
for wwpn in connector['wwpns']:
resp = self.ssh.lsfabric(wwpn=wwpn)
for wwpn_info in resp:
try:
if (wwpn_info['remote_wwpn'] and
wwpn_info['name'] and
wwpn_info['remote_wwpn'].lower() ==
wwpn.lower()):
host_name = wwpn_info['name']
break
except KeyError:
self.handle_keyerror('lsfabric', wwpn_info)
if host_name:
break
if host_name:
LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
return host_name
def update_host_list(host, host_list):
idx = host_list.index(host)
del host_list[idx]
host_list.insert(0, host)
# That didn't work, so try exhaustive search
hosts_info = self.ssh.lshost()
host_list = list(hosts_info.select('name'))
# If we have a "real" connector, we might be able to find the
# host entry with fewer queries if we move the host entries
# that contain the connector's host property value to the front
# of the list
if 'host' in connector:
# order host_list such that the host entries that
# contain the connector's host name are at the
# beginning of the list
for host in host_list:
if re.search(connector['host'], host):
update_host_list(host, host_list)
# If we have a volume name we have a potential fast path
# for finding the matching host for that volume.
# Add the host_names that have mappings for our volume to the
# head of the list of host names to search them first
if volume_name:
hosts_map_info = self.ssh.lsvdiskhostmap(volume_name)
hosts_map_info_list = list(hosts_map_info.select('host_name'))
# remove the fast path host names from the end of the list
# and move to the front so they are only searched for once.
for host in hosts_map_info_list:
update_host_list(host, host_list)
found = False
for name in host_list:
try:
resp = self.ssh.lshost(host=name)
except exception.VolumeBackendAPIException as ex:
LOG.debug("Exception message: %s", ex.msg)
if 'CMMVC5754E' in ex.msg:
LOG.debug("CMMVC5754E found in CLI exception.")
# CMMVC5754E: The specified object does not exist
# The host has been deleted while walking the list.
# This is a result of a host change on the SVC that
# is out of band to this request.
continue
# unexpected error so reraise it
with excutils.save_and_reraise_exception():
pass
if iscsi:
if 'initiator' in connector:
for iscsi in resp.select('iscsi_name'):
if iscsi == connector['initiator']:
host_name = name
found = True
break
elif 'wwpns' in connector and len(connector['wwpns']):
connector_wwpns = [str(x).lower() for x in connector['wwpns']]
for wwpn in resp.select('WWPN'):
if wwpn and wwpn.lower() in connector_wwpns:
host_name = name
found = True
break
if found:
break
LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
return host_name
def create_host(self, connector, iscsi=False):
"""Create a new host on the storage system.
We create a host name and associate it with the given connection
information. The host name will be a cleaned up version of the given
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
LOG.debug('Enter: create_host: host %s.', connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
host_name = connector['host']
if not isinstance(host_name, six.string_types):
msg = _('create_host: Host name is not unicode or string.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
ports = []
if iscsi:
if 'initiator' in connector:
ports.append(['initiator', '%s' % connector['initiator']])
else:
msg = _('create_host: No initiators supplied.')
else:
if 'wwpns' in connector:
for wwpn in connector['wwpns']:
ports.append(['wwpn', '%s' % wwpn])
else:
msg = _('create_host: No wwpns supplied.')
if not len(ports):
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build a host name for the Storwize host - first clean up the name
if isinstance(host_name, six.text_type):
host_name = unicodedata.normalize('NFKD', host_name).encode(
'ascii', 'replace').decode('ascii')
for num in range(0, 128):
ch = str(chr(num))
if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
host_name = host_name.replace(ch, '-')
# Storwize doesn't like hostname that doesn't starts with letter or _.
if not re.match('^[A-Za-z]', host_name):
host_name = '_' + host_name
# Add a random 8-character suffix to avoid collisions
rand_id = str(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (host_name[:55], rand_id)
# Create a host with one port
port = ports.pop(0)
self.ssh.mkhost(host_name, port[0], port[1])
# Add any additional ports to the host
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.',
{'host': connector['host'], 'host_name': host_name})
return host_name
def delete_host(self, host_name):
self.ssh.rmhost(host_name)
def map_vol_to_host(self, volume_name, host_name, multihostmap):
"""Create a mapping between a volume to a host."""
LOG.debug('Enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name)
if result_lun is None:
result_lun = self.ssh.mkvdiskhostmap(host_name, volume_name, None,
multihostmap)
LOG.debug('Leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s.',
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
return int(result_lun)
def unmap_vol_from_host(self, volume_name, host_name):
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
LOG.warning('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to any host found.',
{'vol_name': volume_name})
return host_name
if host_name is None:
if len(resp) > 1:
LOG.warning('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host '
'specified.', {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
else:
found = False
for h in resp.select('host_name'):
if h == host_name:
found = True
if not found:
LOG.warning('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to host %(host)s found.',
{'vol_name': volume_name, 'host': host_name})
return host_name
# We now know that the mapping exists
self.ssh.rmvdiskhostmap(host_name, volume_name)
LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
return host_name
def check_host_mapped_vols(self, host_name):
return self.ssh.lshostvdiskmap(host_name)
@staticmethod
def build_default_opts(config):
# Ignore capitalization
cluster_partner = config.storwize_svc_stretched_cluster_partner
opt = {'rsize': config.storwize_svc_vol_rsize,
'warning': config.storwize_svc_vol_warning,
'autoexpand': config.storwize_svc_vol_autoexpand,
'grainsize': config.storwize_svc_vol_grainsize,
'compression': config.storwize_svc_vol_compression,
'easytier': config.storwize_svc_vol_easytier,
'iogrp': config.storwize_svc_vol_iogrp,
'qos': None,
'stretched_cluster': cluster_partner,
'replication': False,
'nofmtdisk': config.storwize_svc_vol_nofmtdisk,
'mirror_pool': config.storwize_svc_mirror_pool,
'cycle_period_seconds': config.cycle_period_seconds}
return opt
@staticmethod
def check_vdisk_opts(state, opts):
# Check that grainsize is 32/64/128/256
if opts['grainsize'] not in [32, 64, 128, 256]:
raise exception.InvalidInput(
reason=_('Illegal value specified for '
'storwize_svc_vol_grainsize: set to either '
'32, 64, 128, or 256.'))
# Check that compression is supported
if opts['compression'] and not state['compression_enabled']:
raise exception.InvalidInput(
reason=_('System does not support compression.'))
# Check that rsize is set if compression is set
if opts['compression'] and opts['rsize'] == -1:
raise exception.InvalidInput(
reason=_('If compression is set to True, rsize must '
'also be set (not equal to -1).'))
# Check cycle_period_seconds are in 60-86400
if opts['cycle_period_seconds'] not in range(60, 86401):
raise exception.InvalidInput(
reason=_('cycle_period_seconds should be integer '
'between 60 and 86400.'))
iogs = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
if len(iogs) == 0:
raise exception.InvalidInput(
reason=_('Given I/O group(s) %(iogrp)s not valid; available '
'I/O groups are %(avail)s.')
% {'iogrp': opts['iogrp'],
'avail': state['available_iogrps']})
if opts['nofmtdisk'] and opts['rsize'] != -1:
raise exception.InvalidInput(
reason=_('If nofmtdisk is set to True, rsize must '
'also be set to -1.'))
@staticmethod
def _get_valid_requested_io_groups(state, opts):
given_iogs = str(opts['iogrp'])
iog_list = given_iogs.split(',')
# convert to int
iog_list = list(map(int, iog_list))
LOG.debug("Requested iogroups %s", iog_list)
LOG.debug("Available iogroups %s", state['available_iogrps'])
filtiog = set(iog_list).intersection(state['available_iogrps'])
iog_list = list(filtiog)
LOG.debug("Filtered (valid) requested iogroups %s", iog_list)
return iog_list
def _get_opts_from_specs(self, opts, specs):
qos = {}
for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# We generally do not look at capabilities in the driver, but
# replication is a special case where the user asks for
# a volume to be replicated, and we want both the scheduler and
# the driver to act on the value.
if ((not scope or scope == 'capabilities') and
key == 'replication'):
scope = None
key = 'replication'
words = value.split()
if not (words and len(words) == 2 and words[0] == '<is>'):
LOG.error('Replication must be specified as '
'\'<is> True\' or \'<is> False\'.')
del words[0]
value = words[0]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
# Any keys that the driver should look at should have the
# 'drivers' scope.
if scope and scope != 'drivers':
continue
if key in opts:
this_type = type(opts[key]).__name__
if this_type == 'int':
value = int(value)
elif this_type == 'bool':
value = strutils.bool_from_string(value)
opts[key] = value
if len(qos) != 0:
opts['qos'] = qos
return opts
def _get_qos_from_volume_metadata(self, volume_metadata):
"""Return the QoS information from the volume metadata."""
qos = {}
for i in volume_metadata:
k = i.get('key', None)
value = i.get('value', None)
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
return qos
def _wait_for_a_condition(self, testmethod, timeout=None,
interval=INTERVAL_1_SEC,
raise_exception=False):
start_time = time.time()
if timeout is None:
timeout = DEFAULT_TIMEOUT
def _inner():
try:
testValue = testmethod()
except Exception as ex:
if raise_exception:
LOG.exception("_wait_for_a_condition: %s"
" execution failed.",
testmethod.__name__)
raise exception.VolumeBackendAPIException(data=ex)
else:
testValue = False
LOG.debug('Helper.'
'_wait_for_condition: %(method_name)s '
'execution failed for %(exception)s.',
{'method_name': testmethod.__name__,
'exception': ex.message})
if testValue:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('CommandLineHelper._wait_for_condition: %s timeout.')
% testmethod.__name__)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def get_vdisk_params(self, config, state, type_id,
volume_type=None, volume_metadata=None):
"""Return the parameters for creating the vdisk.
Takes volume type and defaults from config options into account.
"""
opts = self.build_default_opts(config)
ctxt = context.get_admin_context()
if volume_type is None and type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
specs = dict(volume_type).get('extra_specs')
# NOTE(vhou): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Merge the qos_specs into extra_specs and qos_specs has higher
# priority than extra_specs if they have different values for
# the same key.
specs.update(kvs)
opts = self._get_opts_from_specs(opts, specs)
if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos
and volume_metadata):
qos = self._get_qos_from_volume_metadata(volume_metadata)
if len(qos) != 0:
opts['qos'] = qos
self.check_vdisk_opts(state, opts)
return opts
@staticmethod
def _get_vdisk_create_params(opts, add_copies=False):
easytier = 'on' if opts['easytier'] else 'off'
if opts['rsize'] == -1:
params = []
if opts['nofmtdisk']:
params.append('-nofmtdisk')
else:
params = ['-rsize', '%s%%' % str(opts['rsize']),
'-autoexpand', '-warning',
'%s%%' % str(opts['warning'])]
if not opts['autoexpand']:
params.remove('-autoexpand')
if opts['compression']:
params.append('-compressed')
else:
params.extend(['-grainsize', str(opts['grainsize'])])
if add_copies and opts['mirror_pool']:
params.extend(['-copies', '2'])
params.extend(['-easytier', easytier])
return params
def create_vdisk(self, name, size, units, pool, opts):
LOG.debug('Enter: create_vdisk: vdisk %s.', name)
mdiskgrp = pool
if opts['mirror_pool']:
if not self.is_pool_defined(opts['mirror_pool']):
raise exception.InvalidInput(
reason=_('The pool %s in which mirrored copy is stored '
'is invalid') % opts['mirror_pool'])
# The syntax of pool SVC expects is pool:mirror_pool in
# mdiskgrp for mirror volume
mdiskgrp = '%s:%s' % (pool, opts['mirror_pool'])
params = self._get_vdisk_create_params(
opts, add_copies=True if opts['mirror_pool'] else False)
self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params)
LOG.debug('Leave: _create_vdisk: volume %s.', name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
return attrs
def is_vdisk_defined(self, vdisk_name):
"""Check if vdisk is defined."""
attrs = self.get_vdisk_attributes(vdisk_name)
return attrs is not None
def find_vdisk_copy_id(self, vdisk, pool):
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
if mdisk_grp == pool:
return copy_id
msg = _('Failed to find a vdisk copy in the expected pool.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def get_vdisk_copy_attrs(self, vdisk, copy_id):
return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
def get_vdisk_copies(self, vdisk):
copies = {'primary': None,
'secondary': None}
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, status, sync, primary, mdisk_grp in (
resp.select('copy_id', 'status', 'sync',
'primary', 'mdisk_grp_name')):
copy = {'copy_id': copy_id,
'status': status,
'sync': sync,
'primary': primary,
'mdisk_grp_name': mdisk_grp,
'sync_progress': None}
if copy['sync'] != 'yes':
progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
copy['sync_progress'] = progress_info['progress']
if copy['primary'] == 'yes':
copies['primary'] = copy
else:
copies['secondary'] = copy
return copies
def _prepare_fc_map(self, fc_map_id, timeout):
self.ssh.prestartfcmap(fc_map_id)
mapping_ready = False
max_retries = (timeout // self.WAIT_TIME) + 1
for try_number in range(1, max_retries):
mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
if (mapping_attrs is None or
'status' not in mapping_attrs):
break
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
break
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcmap(fc_map_id)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexecpted mapping status %(status)s for mapping '
'%(id)s. Attributes: %(attr)s.')
% {'status': mapping_attrs['status'],
'id': fc_map_id,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
greenthread.sleep(self.WAIT_TIME)
if not mapping_ready:
msg = (_('Mapping %(id)s prepare failed to complete within the'
'allotted %(to)d seconds timeout. Terminating.')
% {'id': fc_map_id,
'to': timeout})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def start_fc_consistgrp(self, fc_consistgrp):
self.ssh.startfcconsistgrp(fc_consistgrp)
def create_fc_consistgrp(self, fc_consistgrp):
self.ssh.mkfcconsistgrp(fc_consistgrp)
def delete_fc_consistgrp(self, fc_consistgrp):
self.ssh.rmfcconsistgrp(fc_consistgrp)
def stop_fc_consistgrp(self, fc_consistgrp):
self.ssh.stopfcconsistgrp(fc_consistgrp)
def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state,
config, timeout):
model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
snapshots_model_update = []
try:
for snapshot in snapshots:
opts = self.get_vdisk_params(config, state,
snapshot['volume_type_id'])
volume = snapshot.volume
if not volume:
msg = (_("Can't get volume from snapshot: %(id)s")
% {"id": snapshot.id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
pool = utils.extract_host(volume.host, 'pool')
self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
snapshot['name'],
fc_consistgrp,
config, opts, False,
pool=pool)
self.prepare_fc_consistgrp(fc_consistgrp, timeout)
self.start_fc_consistgrp(fc_consistgrp)
# There is CG limitation that could not create more than 128 CGs.
# After start CG, we delete CG to avoid CG limitation.
# Cinder general will maintain the CG and snapshots relationship.
self.delete_fc_consistgrp(fc_consistgrp)
except exception.VolumeBackendAPIException as err:
model_update['status'] = fields.GroupSnapshotStatus.ERROR
# Release cg
self.delete_fc_consistgrp(fc_consistgrp)
LOG.error("Failed to create CGSnapshot. "
"Exception: %s.", err)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': model_update['status']})
return model_update, snapshots_model_update
def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots):
"""Delete flashcopy maps and consistent group."""
model_update = {'status': fields.GroupSnapshotStatus.DELETED}
snapshots_model_update = []
try:
for snapshot in snapshots:
self.delete_vdisk(snapshot['name'], True)
except exception.VolumeBackendAPIException as err:
model_update['status'] = (
fields.GroupSnapshotStatus.ERROR_DELETING)
LOG.error("Failed to delete the snapshot %(snap)s of "
"CGSnapshot. Exception: %(exception)s.",
{'snap': snapshot['name'], 'exception': err})
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': model_update['status']})
return model_update, snapshots_model_update
def prepare_fc_consistgrp(self, fc_consistgrp, timeout):
"""Prepare FC Consistency Group."""
self.ssh.prestartfcconsistgrp(fc_consistgrp)
def prepare_fc_consistgrp_success():
mapping_ready = False
mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp)
if (mapping_attrs is None or
'status' not in mapping_attrs):
pass
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcconsistgrp(fc_consistgrp)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexpected mapping status %(status)s for mapping'
'%(id)s. Attributes: %(attr)s.') %
{'status': mapping_attrs['status'],
'id': fc_consistgrp,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return mapping_ready
self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout)
def create_cg_from_source(self, group, fc_consistgrp,
sources, targets, state,
config, timeout):
"""Create consistence group from source"""
LOG.debug('Enter: create_cg_from_source: cg %(cg)s'
' source %(source)s, target %(target)s',
{'cg': fc_consistgrp, 'source': sources, 'target': targets})
model_update = {'status': fields.GroupStatus.AVAILABLE}
ctxt = context.get_admin_context()
try:
for source, target in zip(sources, targets):
opts = self.get_vdisk_params(config, state,
source['volume_type_id'])
pool = utils.extract_host(target['host'], 'pool')
self.create_flashcopy_to_consistgrp(source['name'],
target['name'],
fc_consistgrp,
config, opts,
True, pool=pool)
self.prepare_fc_consistgrp(fc_consistgrp, timeout)
self.start_fc_consistgrp(fc_consistgrp)
self.delete_fc_consistgrp(fc_consistgrp)
volumes_model_update = self._get_volume_model_updates(
ctxt, targets, group['id'], model_update['status'])
except exception.VolumeBackendAPIException as err:
model_update['status'] = fields.GroupStatus.ERROR
volumes_model_update = self._get_volume_model_updates(
ctxt, targets, group['id'], model_update['status'])
with excutils.save_and_reraise_exception():
# Release cg
self.delete_fc_consistgrp(fc_consistgrp)
LOG.error("Failed to create CG from CGsnapshot. "
"Exception: %s", err)
return model_update, volumes_model_update
LOG.debug('Leave: create_cg_from_source.')
return model_update, volumes_model_update
def _get_volume_model_updates(self, ctxt, volumes, cgId,
status='available'):
"""Update the volume model's status and return it."""
volume_model_updates = []
LOG.info("Updating status for CG: %(id)s.",
{'id': cgId})
if volumes:
for volume in volumes:
volume_model_updates.append({'id': volume['id'],
'status': status})
else:
LOG.info("No volume found for CG: %(cg)s.",
{'cg': cgId})
return volume_model_updates
def run_flashcopy(self, source, target, timeout, copy_rate,
full_copy=True):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug('Enter: run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s.',
{'source': source, 'target': target})
fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate)
self._prepare_fc_map(fc_map_id, timeout)
self.ssh.startfcmap(fc_map_id)
LOG.debug('Leave: run_flashcopy: FlashCopy started from '
'%(source)s to %(target)s.',
{'source': source, 'target': target})
def create_flashcopy_to_consistgrp(self, source, target, consistgrp,
config, opts, full_copy=False,
pool=None):
"""Create a FlashCopy mapping and add to consistent group."""
LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
' from source %(source)s to target %(target)s'
'Then add the flashcopy to %(cg)s.',
{'source': source, 'target': target, 'cg': consistgrp})
src_attrs = self.get_vdisk_attributes(source)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s '
'does not exist.') % {'src': source})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = src_attrs['mdisk_grp_name']
opts['iogrp'] = src_attrs['IO_group_id']
self.create_vdisk(target, src_size, 'b', pool, opts)
self.ssh.mkfcmap(source, target, full_copy,
config.storwize_svc_flashcopy_rate,
consistgrp=consistgrp)
LOG.debug('Leave: create_flashcopy_to_consistgrp: '
'FlashCopy started from %(source)s to %(target)s.',
{'source': source, 'target': target})
def _get_vdisk_fc_mappings(self, vdisk):
"""Return FlashCopy mappings that this vdisk is associated with."""
mapping_ids = []
resp = self.ssh.lsvdiskfcmappings(vdisk)
for id in resp.select('id'):
mapping_ids.append(id)
return mapping_ids
def _get_flashcopy_mapping_attributes(self, fc_map_id):
resp = self.ssh.lsfcmap(fc_map_id)
if not len(resp):
return None
return resp[0]
def _get_flashcopy_consistgrp_attr(self, fc_map_id):
resp = self.ssh.lsfcconsistgrp(fc_map_id)
if not len(resp):
return None
return resp[0]
def _check_vdisk_fc_mappings(self, name,
allow_snaps=True, allow_fctgt=False):
"""FlashCopy mapping check helper."""
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
attrs = self._get_flashcopy_mapping_attributes(map_id)
# We should ignore GMCV flash copies
if not attrs or 'yes' == attrs['rc_controlled']:
continue
source = attrs['source_vdisk_name']
target = attrs['target_vdisk_name']
copy_rate = attrs['copy_rate']
status = attrs['status']
if allow_fctgt and target == name and status == 'copying':
self.ssh.stopfcmap(map_id)
attrs = self._get_flashcopy_mapping_attributes(map_id)
if attrs:
status = attrs['status']
if copy_rate == '0':
if source == name:
# Vdisk with snapshots. Return False if snapshot
# not allowed.
if not allow_snaps:
raise loopingcall.LoopingCallDone(retvalue=False)
self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
wait_for_copy = True
else:
# A snapshot
if target != name:
msg = (_('Vdisk %(name)s not involved in '
'mapping %(src)s -> %(tgt)s.') %
{'name': name, 'src': source, 'tgt': target})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if status in ['copying', 'prepared']:
self.ssh.stopfcmap(map_id)
# Need to wait for the fcmap to change to
# stopped state before remove fcmap
wait_for_copy = True
elif status in ['stopping', 'preparing']:
wait_for_copy = True
else:
self.ssh.rmfcmap(map_id)
# Case 4: Copy in progress - wait and will autodelete
else:
if status == 'prepared':
self.ssh.stopfcmap(map_id)
self.ssh.rmfcmap(map_id)
elif status in ['idle_or_copied', 'stopped']:
# Prepare failed or stopped
self.ssh.rmfcmap(map_id)
else:
wait_for_copy = True
if not wait_for_copy or not len(mapping_ids):
raise loopingcall.LoopingCallDone(retvalue=True)
def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True,
allow_fctgt=False):
"""Ensure vdisk has no flashcopy mappings."""
timer = loopingcall.FixedIntervalLoopingCall(
self._check_vdisk_fc_mappings, name,
allow_snaps, allow_fctgt)
# Create a timer greenthread. The default volume service heart
# beat is every 10 seconds. The flashcopy usually takes hours
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.',
name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
return ret
def start_relationship(self, volume_name, primary=None):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.startrcrelationship(vol_attrs['RC_name'], primary)
def stop_relationship(self, volume_name, access=False):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=access)
def create_relationship(self, master, aux, system, asyncmirror,
cyclingmode=False, masterchange=None,
cycle_period_seconds=None):
try:
rc_id = self.ssh.mkrcrelationship(master, aux, system,
asyncmirror, cyclingmode)
except exception.VolumeBackendAPIException as e:
# CMMVC5959E is the code in Stowize storage, meaning that
# there is a relationship that already has this name on the
# master cluster.
if 'CMMVC5959E' not in e:
# If there is no relation between the primary and the
# secondary back-end storage, the exception is raised.
raise
if rc_id:
# We need setup master and aux change volumes for gmcv
# before we can start remote relationship
# aux change volume must be set on target site
if cycle_period_seconds:
self.change_relationship_cycleperiod(master,
cycle_period_seconds)
if masterchange:
self.change_relationship_changevolume(master,
masterchange, True)
else:
self.start_relationship(master)
def change_relationship_changevolume(self, volume_name,
change_volume, master):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name'] and change_volume:
self.ssh.ch_rcrelationship_changevolume(vol_attrs['RC_name'],
change_volume, master)
def change_relationship_cycleperiod(self, volume_name,
cycle_period_seconds):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name'] and cycle_period_seconds:
self.ssh.ch_rcrelationship_cycleperiod(vol_attrs['RC_name'],
cycle_period_seconds)
def delete_relationship(self, volume_name):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.rmrcrelationship(vol_attrs['RC_name'], True)
def get_relationship_info(self, volume_name):
vol_attrs = self.get_vdisk_attributes(volume_name)
if not vol_attrs or not vol_attrs['RC_name']:
LOG.info("Unable to get remote copy information for "
"volume %s", volume_name)
return
relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
return relationship[0] if len(relationship) > 0 else None
def delete_rc_volume(self, volume_name, target_vol=False):
vol_name = volume_name
if target_vol:
vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume_name
try:
rel_info = self.get_relationship_info(vol_name)
if rel_info:
self.delete_relationship(vol_name)
# Delete change volume
self.delete_vdisk(
storwize_const.REPLICA_CHG_VOL_PREFIX + vol_name, False)
self.delete_vdisk(vol_name, False)
except Exception as e:
msg = (_('Unable to delete the volume for '
'volume %(vol)s. Exception: %(err)s.'),
{'vol': vol_name, 'err': e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
def switch_relationship(self, relationship, aux=True):
self.ssh.switchrelationship(relationship, aux)
def get_partnership_info(self, system_name):
partnership = self.ssh.lspartnership(system_name)
return partnership[0] if len(partnership) > 0 else None
def get_partnershipcandidate_info(self, system_name):
candidates = self.ssh.lspartnershipcandidate()
for candidate in candidates:
if system_name == candidate['name']:
return candidate
return None
def mkippartnership(self, ip_v4, bandwith=1000, copyrate=50):
self.ssh.mkippartnership(ip_v4, bandwith, copyrate)
def mkfcpartnership(self, system_name, bandwith=1000, copyrate=50):
self.ssh.mkfcpartnership(system_name, bandwith, copyrate)
def chpartnership(self, partnership_id):
self.ssh.chpartnership(partnership_id)
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk)
if not self.is_vdisk_defined(vdisk):
LOG.info('Tried to delete non-existent vdisk %s.', vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True,
allow_fctgt=True)
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk)
def create_copy(self, src, tgt, src_id, config, opts,
full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.',
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
'does not exist.') % {'src': src, 'src_id': src_id})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = src_attrs['mdisk_grp_name']
opts['iogrp'] = src_attrs['IO_group_id']
self.create_vdisk(tgt, src_size, 'b', pool, opts)
timeout = config.storwize_svc_flashcopy_timeout
try:
self.run_flashcopy(src, tgt, timeout,
config.storwize_svc_flashcopy_rate,
full_copy=full_copy)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_vdisk(tgt, True)
LOG.debug('Leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s.',
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
self.ssh.expandvdisksize(vdisk, amount)
def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config,
auto_delete=False):
"""Add a vdisk copy in the given pool."""
resp = self.ssh.lsvdiskcopy(vdisk)
if len(resp) > 1:
msg = (_('add_vdisk_copy failed: A copy of volume %s exists. '
'Adding another copy would exceed the limit of '
'2 copies.') % vdisk)
raise exception.VolumeDriverException(message=msg)
orig_copy_id = resp[0].get("copy_id", None)
if orig_copy_id is None:
msg = (_('add_vdisk_copy started without a vdisk copy in the '
'expected pool.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if volume_type is None:
opts = self.get_vdisk_params(config, state, None)
else:
opts = self.get_vdisk_params(config, state, volume_type['id'],
volume_type=volume_type)
params = self._get_vdisk_create_params(opts)
try:
new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params,
auto_delete)
except exception.VolumeBackendAPIException as e:
msg = (_('Unable to add vdiskcopy for volume %(vol)s. '
'Exception: %(err)s.'),
{'vol': vdisk, 'err': e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
return (orig_copy_id, new_copy_id)
def is_vdisk_copy_synced(self, vdisk, copy_id):
sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync']
if sync == 'yes':
return True
return False
def rm_vdisk_copy(self, vdisk, copy_id):
self.ssh.rmvdiskcopy(vdisk, copy_id)
def lsvdiskcopy(self, vdisk, copy_id=None):
return self.ssh.lsvdiskcopy(vdisk, copy_id)
@staticmethod
def can_migrate_to_host(host, state):
if 'location_info' not in host['capabilities']:
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_pool) = info.split(':')
except ValueError:
return None
if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
return None
return dest_pool
def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume."""
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
def update_vdisk_qos(self, vdisk, qos):
"""Update all the QoS in terms of a key and value.
svc_qos_keys saves all the supported QoS parameters. Going through
this dict, we set the new values to all the parameters. If QoS is
available in the QoS configuration, the value is taken from it;
if not, the value will be set to default.
"""
for key, value in self.svc_qos_keys.items():
param = value['param']
if key in qos.keys():
# If the value is set in QoS, take the value from
# the QoS configuration.
v = qos[key]
else:
# If not, set the value to default.
v = value['default']
self.ssh.chvdisk(vdisk, ['-' + param, str(v)])
def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS."""
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
# Take the default value.
value = self.svc_qos_keys[key]['default']
self.ssh.chvdisk(vdisk, ['-' + param, value])
def change_vdisk_options(self, vdisk, changes, opts, state):
if 'warning' in opts:
opts['warning'] = '%s%%' % str(opts['warning'])
if 'easytier' in opts:
opts['easytier'] = 'on' if opts['easytier'] else 'off'
if 'autoexpand' in opts:
opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
for key in changes:
self.ssh.chvdisk(vdisk, ['-' + key, opts[key]])
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0.',
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))
self.ssh.addvdiskaccess(vdisk, str(iogrp[0]))
self.ssh.rmvdiskaccess(vdisk, str(iogrp[1]))
def vdisk_by_uid(self, vdisk_uid):
"""Returns the properties of the vdisk with the specified UID.
Returns None if no such disk exists.
"""
vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)
if len(vdisks) == 0:
return None
if len(vdisks) != 1:
msg = (_('Expected single vdisk returned from lsvdisk when '
'filtering on vdisk_UID. %(count)s were returned.') %
{'count': len(vdisks)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
vdisk = vdisks.result[0]
return self.ssh.lsvdisk(vdisk['name'])
def is_vdisk_in_use(self, vdisk):
"""Returns True if the specified vdisk is mapped to at least 1 host."""
resp = self.ssh.lsvdiskhostmap(vdisk)
return len(resp) != 0
def rename_vdisk(self, vdisk, new_name):
self.ssh.chvdisk(vdisk, ['-name', new_name])
def change_vdisk_primary_copy(self, vdisk, copy_id):
self.ssh.chvdisk(vdisk, ['-primary', copy_id])
def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
self.ssh.migratevdisk(vdisk, dest_pool, copy_id)
class CLIResponse(object):
"""Parse SVC CLI output and generate iterable."""
def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
super(CLIResponse, self).__init__()
if ssh_cmd:
self.ssh_cmd = ' '.join(ssh_cmd)
else:
self.ssh_cmd = 'None'
self.raw = raw
self.delim = delim
self.with_header = with_header
self.result = self._parse()
def select(self, *keys):
for a in self.result:
vs = []
for k in keys:
v = a.get(k, None)
if isinstance(v, six.string_types) or v is None:
v = [v]
if isinstance(v, list):
vs.append(v)
for item in zip(*vs):
if len(item) == 1:
yield item[0]
else:
yield item
def __getitem__(self, key):
try:
return self.result[key]
except KeyError:
msg = (_('Did not find the expected key %(key)s in %(fun)s: '
'%(raw)s.') % {'key': key, 'fun': self.ssh_cmd,
'raw': self.raw})
raise exception.VolumeBackendAPIException(data=msg)
def __iter__(self):
for a in self.result:
yield a
def __len__(self):
return len(self.result)
def _parse(self):
def get_reader(content, delim):
for line in content.lstrip().splitlines():
line = line.strip()
if line:
yield line.split(delim)
else:
yield []
if isinstance(self.raw, six.string_types):
stdout, stderr = self.raw, ''
else:
stdout, stderr = self.raw
reader = get_reader(stdout, self.delim)
result = []
if self.with_header:
hds = tuple()
for row in reader:
hds = row
break
for row in reader:
cur = dict()
if len(hds) != len(row):
msg = (_('Unexpected CLI response: header/row mismatch. '
'header: %(header)s, row: %(row)s.')
% {'header': hds,
'row': row})
raise exception.VolumeBackendAPIException(data=msg)
for k, v in zip(hds, row):
CLIResponse.append_dict(cur, k, v)
result.append(cur)
else:
cur = dict()
for row in reader:
if row:
CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
elif cur: # start new section
result.append(cur)
cur = dict()
if cur:
result.append(cur)
return result
@staticmethod
def append_dict(dict_, key, value):
key, value = key.strip(), value.strip()
obj = dict_.get(key, None)
if obj is None:
dict_[key] = value
elif isinstance(obj, list):
obj.append(value)
dict_[key] = obj
else:
dict_[key] = [obj, value]
return dict_
class StorwizeSVCCommonDriver(san.SanDriver,
driver.ManageableVD,
driver.MigrateVD,
driver.CloneableImageVD):
"""IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
to lsfabric, clear unused data from connections, ensure
matching WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
2.0 - Code refactor, split init file and placed shared methods
for FC and iSCSI within the StorwizeSVCCommonDriver class
2.1 - Added replication V2 support to the global/metro mirror
mode
2.1.1 - Update replication to version 2.1
"""
VERSION = "2.1.1"
VDISKCOPYOPS_INTERVAL = 600
DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0
def __init__(self, *args, **kwargs):
super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._backend_name = self.configuration.safe_get('volume_backend_name')
self.active_ip = self.configuration.san_ip
self.inactive_ip = self.configuration.storwize_san_secondary_ip
self._master_backend_helpers = StorwizeHelpers(self._run_ssh)
self._aux_backend_helpers = None
self._helpers = self._master_backend_helpers
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.protocol = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
self._active_backend_id = kwargs.get('active_backend_id')
# This dictionary is used to map each replication target to certain
# replication manager object.
self.replica_manager = {}
# One driver can be configured with only one replication target
# to failover.
self._replica_target = {}
# This boolean is used to indicate whether replication is supported
# by this storage.
self._replica_enabled = False
# This list is used to save the supported replication modes.
self._supported_replica_types = []
# This is used to save the available pools in failed-over status
self._secondary_pools = None
# Storwize has the limitation that can not burst more than 3 new ssh
# connections within 1 second. So slow down the initialization.
time.sleep(1)
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
# v2.1 replication setup
self._get_storwize_config()
# Update the storwize state
self._update_storwize_state()
# Validate that the pool exists
self._validate_pools_exist()
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = objects.VolumeList.get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = volume.admin_metadata
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug('leave: do_setup')
def _update_storwize_state(self):
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Check if compression is supported
self._state['compression_enabled'] = (self._helpers.
compression_enabled())
# Get the available I/O groups
self._state['available_iogrps'] = (self._helpers.
get_available_io_groups())
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
def _get_backend_pools(self):
if not self._active_backend_id:
return self.configuration.storwize_svc_volpool_name
elif not self._secondary_pools:
self._secondary_pools = [self._replica_target.get('pool_name')]
return self._secondary_pools
def _validate_pools_exist(self):
# Validate that the pool exists
pools = self._get_backend_pools()
for pool in pools:
if not self._helpers.is_pool_defined(pool):
reason = (_('Failed getting details for pool %s.') % pool)
raise exception.InvalidInput(reason=reason)
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if self.protocol not in self._state['enabled_protocols']:
# TODO(mc_nair): improve this error message by looking at
# self._state['enabled_protocols'] to tell user what driver to use
raise exception.InvalidInput(
reason=_('The storage device does not support %(prot)s. '
'Please configure the device to support %(prot)s or '
'switch to a driver using a different protocol.')
% {'prot': self.protocol})
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set.') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option.'))
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug('leave: check_for_setup_error')
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
cinder_utils.check_ssh_injection(cmd_list)
command = ' '.join(cmd_list)
if not self.sshpool:
try:
self.sshpool = self._set_up_sshpool(self.active_ip)
except paramiko.SSHException:
LOG.warning('Unable to use san_ip to create SSHPool. Now '
'attempting to use storwize_san_secondary_ip '
'to create SSHPool.')
if self._toggle_ip():
self.sshpool = self._set_up_sshpool(self.active_ip)
else:
LOG.warning('Unable to create SSHPool using san_ip '
'and not able to use '
'storwize_san_secondary_ip since it is '
'not configured.')
raise
try:
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
except Exception:
# Need to check if creating an SSHPool storwize_san_secondary_ip
# before raising an error.
try:
if self._toggle_ip():
LOG.warning("Unable to execute SSH command with "
"%(inactive)s. Attempting to execute SSH "
"command with %(active)s.",
{'inactive': self.inactive_ip,
'active': self.active_ip})
self.sshpool = self._set_up_sshpool(self.active_ip)
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
else:
LOG.warning('Not able to use '
'storwize_san_secondary_ip since it is '
'not configured.')
raise
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error running SSH command: %s",
command)
def _set_up_sshpool(self, ip):
password = self.configuration.san_password
privatekey = self.configuration.san_private_key
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
sshpool = ssh_utils.SSHPool(
ip,
self.configuration.san_ssh_port,
self.configuration.ssh_conn_timeout,
self.configuration.san_login,
password=password,
privatekey=privatekey,
min_size=min_size,
max_size=max_size)
return sshpool
def _ssh_execute(self, sshpool, command,
check_exit_code = True, attempts=1):
try:
with sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.error('Error has occurred: %s', e)
last_exception = e
greenthread.sleep(self.DEFAULT_GR_SLEEP)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error running SSH command: %s", command)
def _toggle_ip(self):
# Change active_ip if storwize_san_secondary_ip is set.
if self.configuration.storwize_san_secondary_ip is None:
return False
self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip
LOG.info('Toggle active_ip from %(old)s to %(new)s.',
{'old': self.inactive_ip,
'new': self.active_ip})
return True
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
vol_name = self._get_target_vol(volume)
volume_defined = self._helpers.is_vdisk_defined(vol_name)
if not volume_defined:
LOG.error('ensure_export: Volume %s not found on storage.',
volume['name'])
def create_export(self, ctxt, volume, connector):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def _get_vdisk_params(self, type_id, volume_type=None,
volume_metadata=None):
return self._helpers.get_vdisk_params(self.configuration,
self._state, type_id,
volume_type=volume_type,
volume_metadata=volume_metadata)
def create_volume(self, volume):
LOG.debug('enter: create_volume: volume %s', volume['name'])
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
pool = utils.extract_host(volume['host'], 'pool')
if opts['mirror_pool'] and rep_type:
reason = _('Create mirror volume with replication enabled is '
'not supported.')
raise exception.InvalidInput(reason=reason)
opts['iogrp'] = self._helpers.select_io_group(self._state, opts)
self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
model_update = None
if rep_type:
replica_obj = self._get_replica_obj(rep_type)
replica_obj.volume_replication_setup(ctxt, volume)
model_update = {'replication_status':
fields.ReplicationStatus.ENABLED}
LOG.debug('leave: create_volume:\n volume: %(vol)s\n '
'model_update %(model_update)s',
{'vol': volume['name'],
'model_update': model_update})
return model_update
def delete_volume(self, volume):
LOG.debug('enter: delete_volume: volume %s', volume['name'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
if self._aux_backend_helpers:
self._aux_backend_helpers.delete_rc_volume(volume['name'],
target_vol=True)
if not self._active_backend_id:
self._master_backend_helpers.delete_rc_volume(volume['name'])
else:
# If it's in fail over state, also try to delete the volume
# in master backend
try:
self._master_backend_helpers.delete_rc_volume(
volume['name'])
except Exception as ex:
LOG.error('Failed to get delete volume %(volume)s in '
'master backend. Exception: %(err)s.',
{'volume': volume['name'],
'err': ex})
else:
if self._active_backend_id:
msg = (_('Error: delete non-replicate volume in failover mode'
' is not allowed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
LOG.debug('leave: delete_volume: volume %s', volume['name'])
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
# TODO(zhaochy): change to use snapshot.volume
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
rep_type = self._get_volume_replicated_type(
ctxt, None, source_vol['volume_type_id'])
if rep_type == storwize_const.GMCV:
# GMCV volume will have problem to failback
# when it has flash copy relationship besides change volumes
msg = _('create_snapshot: Create snapshot to '
'gmcv replication volume is not allowed.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
pool = utils.extract_host(source_vol['host'], 'pool')
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False, pool=pool)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = utils.extract_host(volume['host'], 'pool')
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True, pool=pool)
# The volume size is equal to the snapshot size in most
# of the cases. But in some scenario, the volume size
# may be bigger than the source volume size.
# SVC does not support flashcopy between two volumes
# with two different size. So use the snapshot size to
# create volume first and then extend the volume to-
# the target size.
if volume['size'] > snapshot['volume_size']:
# extend the new created target volume to expected size.
self._extend_volume_op(volume, volume['size'],
snapshot['volume_size'])
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
self._validate_replication_enabled()
replica_obj = self._get_replica_obj(rep_type)
replica_obj.volume_replication_setup(ctxt, volume)
return {'replication_status': fields.ReplicationStatus.ENABLED}
def create_cloned_volume(self, tgt_volume, src_volume):
"""Creates a clone of the specified volume."""
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
pool = utils.extract_host(tgt_volume['host'], 'pool')
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True, pool=pool)
# The source volume size is equal to target volume size
# in most of the cases. But in some scenarios, the target
# volume size may be bigger than the source volume size.
# SVC does not support flashcopy between two volumes
# with two different sizes. So use source volume size to
# create target volume first and then extend target
# volume to original size.
if tgt_volume['size'] > src_volume['size']:
# extend the new created target volume to expected size.
self._extend_volume_op(tgt_volume, tgt_volume['size'],
src_volume['size'])
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, tgt_volume)
if rep_type:
self._validate_replication_enabled()
replica_obj = self._get_replica_obj(rep_type)
replica_obj.volume_replication_setup(ctxt, tgt_volume)
return {'replication_status': fields.ReplicationStatus.ENABLED}
def extend_volume(self, volume, new_size):
self._extend_volume_op(volume, new_size)
def _extend_volume_op(self, volume, new_size, old_size=None):
LOG.debug('enter: _extend_volume_op: volume %s', volume['id'])
volume_name = self._get_target_vol(volume)
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name,
allow_snaps=False)
if not ret:
msg = (_('_extend_volume_op: Extending a volume with snapshots is '
'not supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if old_size is None:
old_size = volume.size
extend_amt = int(new_size) - old_size
rel_info = self._helpers.get_relationship_info(volume_name)
if rel_info:
LOG.warning('_extend_volume_op: Extending a volume with '
'remote copy is not recommended.')
try:
rep_type = rel_info['copy_type']
cyclingmode = rel_info['cycling_mode']
self._master_backend_helpers.delete_relationship(
volume.name)
tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
volume.name)
self._master_backend_helpers.extend_vdisk(volume.name,
extend_amt)
self._aux_backend_helpers.extend_vdisk(tgt_vol, extend_amt)
tgt_sys = self._aux_backend_helpers.get_system_info()
if storwize_const.GMCV_MULTI == cyclingmode:
tgt_change_vol = (
storwize_const.REPLICA_CHG_VOL_PREFIX +
tgt_vol)
source_change_vol = (
storwize_const.REPLICA_CHG_VOL_PREFIX +
volume.name)
self._master_backend_helpers.extend_vdisk(
source_change_vol, extend_amt)
self._aux_backend_helpers.extend_vdisk(
tgt_change_vol, extend_amt)
src_change_opts = self._get_vdisk_params(
volume.volume_type_id)
cycle_period_seconds = src_change_opts.get(
'cycle_period_seconds')
self._master_backend_helpers.create_relationship(
volume.name, tgt_vol, tgt_sys.get('system_name'),
True, True, source_change_vol, cycle_period_seconds)
self._aux_backend_helpers.change_relationship_changevolume(
tgt_vol, tgt_change_vol, False)
self._master_backend_helpers.start_relationship(
volume.name)
else:
self._master_backend_helpers.create_relationship(
volume.name, tgt_vol, tgt_sys.get('system_name'),
True if storwize_const.GLOBAL == rep_type else False)
except Exception as e:
msg = (_('Failed to extend a volume with remote copy '
'%(volume)s. Exception: '
'%(err)s.') % {'volume': volume.id,
'err': e})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self._helpers.extend_vdisk(volume_name, extend_amt)
LOG.debug('leave: _extend_volume_op: volume %s', volume.id)
def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False):
return self._helpers.add_vdisk_copy(volume, dest_pool,
vol_type, self._state,
self.configuration,
auto_delete=auto_delete)
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
LOG.error('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.', volume['id'])
return
except ValueError:
LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have '
'the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.',
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not '
'have any registered vdisk copy operations.',
volume['id'])
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
'not have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.',
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def _check_volume_copy_ops(self):
LOG.debug("Enter: update volume copy status.")
ctxt = context.get_admin_context()
copy_items = list(self._vdiskcopyops.items())
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warning('Volume %s does not exist.', vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
LOG.info('_check_volume_copy_ops: Volume %(vol)s does '
'not have the specified vdisk copy '
'operation: orig=%(orig)s new=%(new)s.',
{'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug("Exit: update volume copy status.")
# #### V2.1 replication methods #### #
def failover_host(self, context, volumes, secondary_id=None, groups=None):
LOG.debug('enter: failover_host: secondary_id=%(id)s',
{'id': secondary_id})
if not self._replica_enabled:
msg = _("Replication is not properly enabled on backend.")
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
if storwize_const.FAILBACK_VALUE == secondary_id:
# In this case the administrator would like to fail back.
secondary_id, volumes_update = self._replication_failback(context,
volumes)
elif (secondary_id == self._replica_target['backend_id']
or secondary_id is None):
# In this case the administrator would like to fail over.
secondary_id, volumes_update = self._replication_failover(context,
volumes)
else:
msg = (_("Invalid secondary id %s.") % secondary_id)
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
LOG.debug('leave: failover_host: secondary_id=%(id)s',
{'id': secondary_id})
return secondary_id, volumes_update, []
def _replication_failback(self, ctxt, volumes):
"""Fail back all the volume on the secondary backend."""
volumes_update = []
if not self._active_backend_id:
LOG.info("Host has been failed back. doesn't need "
"to fail back again")
return None, volumes_update
try:
self._master_backend_helpers.get_system_info()
except Exception:
msg = (_("Unable to failback due to primary is not reachable."))
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
# start synchronize from aux volume to master volume
self._sync_with_aux(ctxt, rep_volumes)
self._wait_replica_ready(ctxt, rep_volumes)
rep_volumes_update = self._failback_replica_volumes(ctxt,
rep_volumes)
volumes_update.extend(rep_volumes_update)
unrep_volumes_update = self._failover_unreplicated_volume(
unrep_volumes)
volumes_update.extend(unrep_volumes_update)
self._helpers = self._master_backend_helpers
self._active_backend_id = None
# Update the storwize state
self._update_storwize_state()
self._update_volume_stats()
return storwize_const.FAILBACK_VALUE, volumes_update
def _failback_replica_volumes(self, ctxt, rep_volumes):
LOG.debug('enter: _failback_replica_volumes')
volumes_update = []
for volume in rep_volumes:
rep_type = self._get_volume_replicated_type(ctxt, volume)
replica_obj = self._get_replica_obj(rep_type)
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
rep_info = self._helpers.get_relationship_info(tgt_volume)
if not rep_info:
volumes_update.append(
{'volume_id': volume['id'],
'updates':
{'replication_status':
fields.ReplicationStatus.ERROR,
'status': 'error'}})
LOG.error('_failback_replica_volumes:no rc-releationship '
'is established between master: %(master)s and '
'aux %(aux)s. Please re-establish the '
'relationship and synchronize the volumes on '
'backend storage.',
{'master': volume['name'], 'aux': tgt_volume})
continue
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
'primary=%(primary)s',
{'vol': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
try:
model_updates = replica_obj.replication_failback(volume)
volumes_update.append(
{'volume_id': volume['id'],
'updates': model_updates})
except exception.VolumeDriverException:
LOG.error('Unable to fail back volume %(volume_id)s',
{'volume_id': volume.id})
volumes_update.append(
{'volume_id': volume['id'],
'updates': {'replication_status':
fields.ReplicationStatus.ERROR,
'status': 'error'}})
LOG.debug('leave: _failback_replica_volumes '
'volumes_update=%(volumes_update)s',
{'volumes_update': volumes_update})
return volumes_update
def _failover_unreplicated_volume(self, unreplicated_vols):
volumes_update = []
for vol in unreplicated_vols:
if vol.replication_driver_data:
rep_data = json.loads(vol.replication_driver_data)
update_status = rep_data['previous_status']
rep_data = ''
else:
update_status = 'error'
rep_data = json.dumps({'previous_status': vol.status})
volumes_update.append(
{'volume_id': vol.id,
'updates': {'status': update_status,
'replication_driver_data': rep_data}})
return volumes_update
def _sync_with_aux(self, ctxt, volumes):
LOG.debug('enter: _sync_with_aux ')
try:
rep_mgr = self._get_replica_mgr()
rep_mgr.establish_target_partnership()
except Exception as ex:
LOG.warning('Fail to establish partnership in backend. '
'error=%(ex)s', {'error': ex})
for volume in volumes:
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
rep_info = self._helpers.get_relationship_info(tgt_volume)
if not rep_info:
LOG.error('_sync_with_aux: no rc-releationship is '
'established between master: %(master)s and aux '
'%(aux)s. Please re-establish the relationship '
'and synchronize the volumes on backend '
'storage.', {'master': volume['name'],
'aux': tgt_volume})
continue
LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol='
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
'primary=%(primary)s',
{'volume': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
try:
if (rep_info['state'] not in
[storwize_const.REP_CONSIS_SYNC,
storwize_const.REP_CONSIS_COPYING]):
if rep_info['primary'] == 'master':
self._helpers.start_relationship(tgt_volume)
else:
self._helpers.start_relationship(tgt_volume,
primary='aux')
except Exception as ex:
LOG.warning('Fail to copy data from aux to master. master:'
' %(master)s and aux %(aux)s. Please '
're-establish the relationship and synchronize'
' the volumes on backend storage. error='
'%(ex)s', {'master': volume['name'],
'aux': tgt_volume,
'error': ex})
LOG.debug('leave: _sync_with_aux.')
def _wait_replica_ready(self, ctxt, volumes):
for volume in volumes:
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
try:
self._wait_replica_vol_ready(ctxt, tgt_volume)
except Exception as ex:
LOG.error('_wait_replica_ready: wait for volume:%(volume)s'
' remote copy synchronization failed due to '
'error:%(err)s.', {'volume': tgt_volume,
'err': ex})
def _wait_replica_vol_ready(self, ctxt, volume):
LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s',
{'volume': volume})
def _replica_vol_ready():
rep_info = self._helpers.get_relationship_info(volume)
if not rep_info:
msg = (_('_wait_replica_vol_ready: no rc-releationship'
'is established for volume:%(volume)s. Please '
're-establish the rc-relationship and '
'synchronize the volumes on backend storage.'),
{'volume': volume})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: '
'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
'state=%(state)s, primary=%(primary)s',
{'volume': volume,
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
if (rep_info['state'] in
[storwize_const.REP_CONSIS_SYNC,
storwize_const.REP_CONSIS_COPYING]):
return True
elif rep_info['state'] == storwize_const.REP_IDL_DISC:
msg = (_('Wait synchronize failed. volume: %(volume)s'),
{'volume': volume})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return False
self._helpers._wait_for_a_condition(
_replica_vol_ready, timeout=storwize_const.DEFAULT_RC_TIMEOUT,
interval=storwize_const.DEFAULT_RC_INTERVAL,
raise_exception=True)
LOG.debug('leave: _wait_replica_vol_ready: volume=%(volume)s',
{'volume': volume})
def _replication_failover(self, ctxt, volumes):
volumes_update = []
if self._active_backend_id:
LOG.info("Host has been failed over to %s",
self._active_backend_id)
return self._active_backend_id, volumes_update
try:
self._aux_backend_helpers.get_system_info()
except Exception as ex:
msg = (_("Unable to failover due to replication target is not "
"reachable. error=%(ex)s"), {'error': ex})
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes)
volumes_update.extend(rep_volumes_update)
unrep_volumes_update = self._failover_unreplicated_volume(
unrep_volumes)
volumes_update.extend(unrep_volumes_update)
self._helpers = self._aux_backend_helpers
self._active_backend_id = self._replica_target['backend_id']
self._secondary_pools = [self._replica_target['pool_name']]
# Update the storwize state
self._update_storwize_state()
self._update_volume_stats()
return self._active_backend_id, volumes_update
def _failover_replica_volumes(self, ctxt, rep_volumes):
LOG.debug('enter: _failover_replica_volumes')
volumes_update = []
for volume in rep_volumes:
rep_type = self._get_volume_replicated_type(ctxt, volume)
replica_obj = self._get_replica_obj(rep_type)
# Try do the fail-over.
try:
rep_info = self._aux_backend_helpers.get_relationship_info(
storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'])
if not rep_info:
volumes_update.append(
{'volume_id': volume['id'],
'updates':
{'replication_status':
fields.ReplicationStatus.FAILOVER_ERROR,
'status': 'error'}})
LOG.error('_failover_replica_volumes: no rc-'
'releationship is established for master:'
'%(master)s. Please re-establish the rc-'
'relationship and synchronize the volumes on'
' backend storage.',
{'master': volume['name']})
continue
LOG.debug('_failover_replica_volumes: vol=%(vol)s, '
'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
'state=%(state)s, primary=%(primary)s',
{'vol': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
model_updates = replica_obj.failover_volume_host(ctxt, volume)
volumes_update.append(
{'volume_id': volume['id'],
'updates': model_updates})
except exception.VolumeDriverException:
LOG.error('Unable to failover to aux volume. Please make '
'sure that the aux volume is ready.')
volumes_update.append(
{'volume_id': volume['id'],
'updates': {'status': 'error',
'replication_status':
fields.ReplicationStatus.FAILOVER_ERROR}})
LOG.debug('leave: _failover_replica_volumes '
'volumes_update=%(volumes_update)s',
{'volumes_update': volumes_update})
return volumes_update
def _classify_volume(self, ctxt, volumes):
normal_volumes = []
replica_volumes = []
for v in volumes:
volume_type = self._get_volume_replicated_type(ctxt, v)
if volume_type and v['status'] == 'available':
replica_volumes.append(v)
else:
normal_volumes.append(v)
return normal_volumes, replica_volumes
def _get_replica_obj(self, rep_type):
replica_manager = self.replica_manager[
self._replica_target['backend_id']]
return replica_manager.get_replica_obj(rep_type)
def _get_replica_mgr(self):
replica_manager = self.replica_manager[
self._replica_target['backend_id']]
return replica_manager
def _get_target_vol(self, volume):
tgt_vol = volume['name']
if self._active_backend_id:
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
volume['name'])
return tgt_vol
def _validate_replication_enabled(self):
if not self._replica_enabled:
msg = _("Replication is not properly configured on backend.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _get_specs_replicated_type(self, volume_type):
replication_type = None
extra_specs = volume_type.get("extra_specs", {})
rep_val = extra_specs.get('replication_enabled')
if rep_val == "<is> True":
replication_type = extra_specs.get('replication_type',
storwize_const.GLOBAL)
# The format for replication_type in extra spec is in
# "<in> global". Otherwise, the code will
# not reach here.
if replication_type != storwize_const.GLOBAL:
# Pick up the replication type specified in the
# extra spec from the format like "<in> global".
replication_type = replication_type.split()[1]
if replication_type not in storwize_const.VALID_REP_TYPES:
msg = (_("Invalid replication type %s.") % replication_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return replication_type
def _get_volume_replicated_type(self, ctxt, volume, vol_type_id=None):
replication_type = None
volume_type = None
volume_type_id = volume.volume_type_id if volume else vol_type_id
if volume_type_id:
volume_type = objects.VolumeType.get_by_name_or_id(
ctxt, volume_type_id)
if volume_type:
replication_type = self._get_specs_replicated_type(volume_type)
return replication_type
def _get_storwize_config(self):
self._do_replication_setup()
if self._active_backend_id and self._replica_target:
self._helpers = self._aux_backend_helpers
self._replica_enabled = (True if (self._helpers.replication_licensed()
and self._replica_target) else False)
if self._replica_enabled:
self._supported_replica_types = storwize_const.VALID_REP_TYPES
def _do_replication_setup(self):
rep_devs = self.configuration.safe_get('replication_device')
if not rep_devs:
return
if len(rep_devs) > 1:
raise exception.InvalidInput(
reason='Multiple replication devices are configured. '
'Now only one replication_device is supported.')
required_flags = ['san_ip', 'backend_id', 'san_login',
'san_password', 'pool_name']
for flag in required_flags:
if flag not in rep_devs[0]:
raise exception.InvalidInput(
reason=_('%s is not set.') % flag)
rep_target = {}
rep_target['san_ip'] = rep_devs[0].get('san_ip')
rep_target['backend_id'] = rep_devs[0].get('backend_id')
rep_target['san_login'] = rep_devs[0].get('san_login')
rep_target['san_password'] = rep_devs[0].get('san_password')
rep_target['pool_name'] = rep_devs[0].get('pool_name')
# Each replication target will have a corresponding replication.
self._replication_initialize(rep_target)
def _replication_initialize(self, target):
rep_manager = storwize_rep.StorwizeSVCReplicationManager(
self, target, StorwizeHelpers)
if self._active_backend_id:
if self._active_backend_id != target['backend_id']:
msg = (_("Invalid secondary id %s.") % self._active_backend_id)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
# Setup partnership only in non-failover state
else:
try:
rep_manager.establish_target_partnership()
except exception.VolumeDriverException:
LOG.error('The replication src %(src)s has not '
'successfully established partnership with the '
'replica target %(tgt)s.',
{'src': self.configuration.san_ip,
'tgt': target['backend_id']})
self._aux_backend_helpers = rep_manager.get_target_helpers()
self.replica_manager[target['backend_id']] = rep_manager
self._replica_target = target
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
volume_type_id = volume['volume_type_id']
if volume_type_id is not None:
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
resp = self._helpers.lsvdiskcopy(volume.name)
if len(resp) > 1:
copies = self._helpers.get_vdisk_copies(volume.name)
self._helpers.migratevdisk(volume.name, dest_pool,
copies['primary']['copy_id'])
else:
self.add_vdisk_copy(volume.name, dest_pool, vol_type,
auto_delete=True)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume.id, 'host': host['host']})
return (True, None)
def _verify_retype_params(self, volume, new_opts, old_opts, need_copy,
change_mirror, new_rep_type, old_rep_type):
# Some volume parameters can not be changed or changed at the same
# time during volume retype operation. This function checks the
# retype parameters.
resp = self._helpers.lsvdiskcopy(volume.name)
if old_opts['mirror_pool'] and len(resp) == 1:
msg = (_('Unable to retype: volume %s is a mirrorred vol. But it '
'has only one copy in storage.') % volume.name)
raise exception.VolumeDriverException(message=msg)
if need_copy:
# mirror volume can not add volume-copy again.
if len(resp) > 1:
msg = (_('Unable to retype: current action needs volume-copy. '
'A copy of volume %s exists. Adding another copy '
'would exceed the limit of 2 copies.') % volume.name)
raise exception.VolumeDriverException(message=msg)
if old_opts['mirror_pool'] or new_opts['mirror_pool']:
msg = (_('Unable to retype: current action needs volume-copy, '
'it is not allowed for mirror volume '
'%s.') % volume.name)
raise exception.VolumeDriverException(message=msg)
if change_mirror:
if (new_opts['mirror_pool'] and
not self._helpers.is_pool_defined(
new_opts['mirror_pool'])):
msg = (_('Unable to retype: The pool %s in which mirror copy '
'is stored is not valid') % new_opts['mirror_pool'])
raise exception.VolumeDriverException(message=msg)
# There are four options for rep_type: None, metro, global, gmcv
if new_rep_type or old_rep_type:
# If volume is replicated, can't copy
if need_copy or new_opts['mirror_pool'] or old_opts['mirror_pool']:
msg = (_('Unable to retype: current action needs volume-copy, '
'it is not allowed for replication type. '
'Volume = %s') % volume.id)
raise exception.VolumeDriverException(message=msg)
if new_rep_type != old_rep_type:
old_io_grp = self._helpers.get_volume_io_group(volume.name)
if (old_io_grp not in
StorwizeHelpers._get_valid_requested_io_groups(
self._state, new_opts)):
msg = (_('Unable to retype: it is not allowed to change '
'replication type and io group at the same time.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if new_rep_type and old_rep_type:
msg = (_('Unable to retype: it is not allowed to change '
'%(old_rep_type)s volume to %(new_rep_type)s '
'volume.') %
{'old_rep_type': old_rep_type,
'new_rep_type': new_rep_type})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
elif storwize_const.GMCV == new_rep_type:
# To gmcv, we may change cycle_period_seconds if needed
previous_cps = old_opts.get('cycle_period_seconds')
new_cps = new_opts.get('cycle_period_seconds')
if previous_cps != new_cps:
self._helpers.change_relationship_cycleperiod(volume.name,
new_cps)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_matadata'))
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
vdisk_changes = []
need_copy = False
change_mirror = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
if (utils.extract_host(volume['host'], 'pool') !=
utils.extract_host(host['host'], 'pool')):
need_copy = True
if old_opts['mirror_pool'] != new_opts['mirror_pool']:
change_mirror = True
# Check if retype affects volume replication
model_update = None
new_rep_type = self._get_specs_replicated_type(new_type)
old_rep_type = self._get_volume_replicated_type(ctxt, volume)
old_io_grp = self._helpers.get_volume_io_group(volume['name'])
new_io_grp = self._helpers.select_io_group(self._state, new_opts)
self._verify_retype_params(volume, new_opts, old_opts, need_copy,
change_mirror, new_rep_type, old_rep_type)
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
retype_iogrp_property(volume,
new_io_grp, old_io_grp)
try:
self.add_vdisk_copy(volume['name'], dest_pool, new_type,
auto_delete=True)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_io_grp, new_io_grp)
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_io_grp, old_io_grp)
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
if change_mirror:
copies = self._helpers.get_vdisk_copies(volume.name)
if not old_opts['mirror_pool'] and new_opts['mirror_pool']:
# retype from non mirror vol to mirror vol
self.add_vdisk_copy(volume['name'],
new_opts['mirror_pool'], new_type)
elif old_opts['mirror_pool'] and not new_opts['mirror_pool']:
# retype from mirror vol to non mirror vol
secondary = copies['secondary']
if secondary:
self._helpers.rm_vdisk_copy(
volume.name, secondary['copy_id'])
else:
# migrate the second copy to another pool.
self._helpers.migratevdisk(
volume.name, new_opts['mirror_pool'],
copies['secondary']['copy_id'])
if new_opts['qos']:
# Add the new QoS setting to the volume. If the volume has an
# old QoS setting, it will be overwritten.
self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
elif old_opts['qos']:
# If the old_opts contain QoS keys, disable them.
self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
# Delete replica if needed
if old_rep_type and not new_rep_type:
self._aux_backend_helpers.delete_rc_volume(volume['name'],
target_vol=True)
if storwize_const.GMCV == old_rep_type:
self._helpers.delete_vdisk(
storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'],
False)
model_update = {'replication_status':
fields.ReplicationStatus.DISABLED,
'replication_driver_data': None,
'replication_extended_status': None}
# Add replica if needed
if not old_rep_type and new_rep_type:
replica_obj = self._get_replica_obj(new_rep_type)
replica_obj.volume_replication_setup(ctxt, volume)
if storwize_const.GMCV == new_rep_type:
# Set cycle_period_seconds if needed
self._helpers.change_relationship_cycleperiod(
volume['name'],
new_opts.get('cycle_period_seconds'))
model_update = {'replication_status':
fields.ReplicationStatus.ENABLED}
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from Storwize for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self._helpers.rename_vdisk(current_name, original_volume_name)
rep_type = self._get_volume_replicated_type(ctxt, new_volume)
if rep_type:
rel_info = self._helpers.get_relationship_info(current_name)
aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
original_volume_name)
self._aux_backend_helpers.rename_vdisk(
rel_info['aux_vdisk_name'], aux_vol)
except exception.VolumeBackendAPIException:
LOG.error('Unable to rename the logical volume '
'for volume: %s', volume['id'])
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
# If the back-end name(id) for the volume has been renamed,
# it is OK for the volume to keep the original name(id) and there is
# no need to use the column "_name_id" to establish the mapping
# relationship between the volume id and the back-end volume
# name(id).
# Set the key "_name_id" to None for a successful rename.
model_update = {'_name_id': None}
return model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
# Check that the reference is valid
vdisk = self._manage_input_check(ref)
vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name'])
if vdisk_io_grp not in self._state['available_iogrps']:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is not in a valid "
"I/O group."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
# Add replication check
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
vol_rep_type = None
rel_info = self._helpers.get_relationship_info(vdisk['name'])
copies = self._helpers.get_vdisk_copies(vdisk['name'])
if rel_info:
vol_rep_type = (
storwize_const.GMCV if
storwize_const.GMCV_MULTI == rel_info['cycling_mode']
else rel_info['copy_type'])
aux_info = self._aux_backend_helpers.get_system_info()
if rel_info['aux_cluster_id'] != aux_info['system_id']:
msg = (_("Failed to manage existing volume due to the aux "
"cluster for volume %(volume)s is %(aux_id)s. The "
"configured cluster id is %(cfg_id)s") %
{'volume': vdisk['name'],
'aux_id': rel_info['aux_cluster_id'],
'cfg_id': aux_info['system_id']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if vol_rep_type != rep_type:
msg = (_("Failed to manage existing volume due to "
"the replication type of the volume to be managed is "
"mismatch with the provided replication type."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
elif storwize_const.GMCV == rep_type:
if volume['volume_type_id']:
rep_opts = self._get_vdisk_params(
volume['volume_type_id'],
volume_metadata=volume.get('volume_metadata'))
# Check cycle_period_seconds
rep_cps = six.text_type(rep_opts.get('cycle_period_seconds'))
if rel_info['cycle_period_seconds'] != rep_cps:
msg = (_("Failed to manage existing volume due to "
"the cycle_period_seconds %(vol_cps)s of "
"the volume to be managed is mismatch with "
"cycle_period_seconds %(type_cps)s in "
"the provided gmcv replication type.") %
{'vol_cps': rel_info['cycle_period_seconds'],
'type_cps': rep_cps})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if volume['volume_type_id']:
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
resp = self._helpers.lsvdiskcopy(vdisk['name'])
expected_copy_num = 2 if opts['mirror_pool'] else 1
if len(resp) != expected_copy_num:
msg = (_("Failed to manage existing volume due to mirror type "
"mismatch. Volume to be managed has %(resp_len)s "
"copies. mirror_pool of the chosen type is "
"%(mirror_pool)s.") %
{'resp_len': len(resp),
'mirror_pool': opts['mirror_pool']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (opts['mirror_pool']and opts['mirror_pool'] !=
copies['secondary']['mdisk_grp_name']):
msg = (_("Failed to manage existing volume due to mirror pool "
"mismatch. The secondary pool of the volume to be "
"managed is %(sec_copy_pool)s. mirror_pool of the "
"chosen type is %(mirror_pool)s.") %
{'sec_copy_pool': copies['secondary']['mdisk_grp_name'],
'mirror_pool': opts['mirror_pool']})
raise exception.ManageExistingVolumeTypeMismatch(
reason=msg)
vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0')
if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is thin, but "
"the volume type chosen is thick."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if not vdisk_copy['autoexpand'] and opts['rsize'] != -1:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is thick, but "
"the volume type chosen is thin."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_copy['compressed_copy'] == 'no' and
opts['compression']):
msg = (_("Failed to manage existing volume due to the "
"volume to be managed is not compress, but "
"the volume type chosen is compress."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_copy['compressed_copy'] == 'yes' and
not opts['compression']):
msg = (_("Failed to manage existing volume due to the "
"volume to be managed is compress, but "
"the volume type chosen is not compress."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_io_grp not in
StorwizeHelpers._get_valid_requested_io_groups(
self._state, opts)):
msg = (_("Failed to manage existing volume due to "
"I/O group mismatch. The I/O group of the "
"volume to be managed is %(vdisk_iogrp)s. I/O group"
"of the chosen type is %(opt_iogrp)s.") %
{'vdisk_iogrp': vdisk['IO_group_name'],
'opt_iogrp': opts['iogrp']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
pool = utils.extract_host(volume['host'], 'pool')
if copies['primary']['mdisk_grp_name'] != pool:
msg = (_("Failed to manage existing volume due to the "
"pool of the volume to be managed does not "
"match the backend pool. Pool of the "
"volume to be managed is %(vdisk_pool)s. Pool "
"of the backend is %(backend_pool)s.") %
{'vdisk_pool': copies['primary']['mdisk_grp_name'],
'backend_pool': pool})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
model_update = {}
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
if vol_rep_type:
aux_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
self._aux_backend_helpers.rename_vdisk(rel_info['aux_vdisk_name'],
aux_vol)
if storwize_const.GMCV == vol_rep_type:
self._helpers.rename_vdisk(
rel_info['master_change_vdisk_name'],
storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'])
self._aux_backend_helpers.rename_vdisk(
rel_info['aux_change_vdisk_name'],
storwize_const.REPLICA_CHG_VOL_PREFIX + aux_vol)
model_update = {'replication_status':
fields.ReplicationStatus.ENABLED}
return model_update
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing Vdisk for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <uid of disk>} or
{'source-name': <name of the disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
vdisk = self._manage_input_check(ref)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def unmanage(self, volume):
"""Remove the specified volume from Cinder management."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
# Add CG capability to generic volume groups
def create_group(self, context, group):
"""Creates a group.
:param context: the context of the caller.
:param group: the group object.
:returns: model_update
"""
LOG.debug("Creating group.")
model_update = {'status': fields.GroupStatus.AVAILABLE}
for vol_type_id in group.volume_type_ids:
replication_type = self._get_volume_replicated_type(
context, None, vol_type_id)
if replication_type:
# An unsupported configuration
LOG.error('Unable to create group: create group with '
'replication volume type is not supported.')
model_update = {'status': fields.GroupStatus.ERROR}
return model_update
if utils.is_group_a_cg_snapshot_type(group):
return {'status': fields.GroupStatus.AVAILABLE}
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
def delete_group(self, context, group, volumes):
"""Deletes a group.
:param context: the context of the caller.
:param group: the group object.
:param volumes: a list of volume objects in the group.
:returns: model_update, volumes_model_update
"""
LOG.debug("Deleting group.")
if not utils.is_group_a_cg_snapshot_type(group):
# we'll rely on the generic group implementation if it is
# not a consistency group request.
raise NotImplementedError()
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._helpers.delete_vdisk(volume['name'], True)
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except exception.VolumeBackendAPIException as err:
model_update['status'] = (
fields.GroupStatus.ERROR_DELETING)
LOG.error("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s.",
{'vol': volume['name'], 'exception': err})
volumes_model_update.append(
{'id': volume['id'],
'status': fields.GroupStatus.ERROR_DELETING})
return model_update, volumes_model_update
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
"""Updates a group.
:param context: the context of the caller.
:param group: the group object.
:param add_volumes: a list of volume objects to be added.
:param remove_volumes: a list of volume objects to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
"""
LOG.debug("Updating group.")
if utils.is_group_a_cg_snapshot_type(group):
return None, None, None
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of Volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
LOG.debug('Enter: create_group_from_src.')
if not utils.is_group_a_cg_snapshot_type(group):
# we'll rely on the generic volume groups implementation if it is
# not a consistency group request.
raise NotImplementedError()
if group_snapshot and snapshots:
cg_name = 'cg-' + group_snapshot.id
sources = snapshots
elif source_group and source_vols:
cg_name = 'cg-' + source_group.id
sources = source_vols
else:
error_msg = _("create_group_from_src must be creating from a "
"group snapshot, or a source group.")
raise exception.InvalidInput(reason=error_msg)
LOG.debug('create_group_from_src: cg_name %(cg_name)s'
' %(sources)s', {'cg_name': cg_name, 'sources': sources})
self._helpers.create_fc_consistgrp(cg_name)
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.create_cg_from_source(group,
cg_name,
sources,
volumes,
self._state,
self.configuration,
timeout))
LOG.debug("Leave: create_group_from_src.")
return model_update, snapshots_model
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group_snapshot.
:param context: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be created.
:param snapshots: a list of Snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
# Use group_snapshot id as cg name
cg_name = 'cg_snap-' + group_snapshot.id
# Create new cg as cg_snapshot
self._helpers.create_fc_consistgrp(cg_name)
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.run_consistgrp_snapshots(cg_name,
snapshots,
self._state,
self.configuration,
timeout))
return model_update, snapshots_model
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group_snapshot.
:param context: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be deleted.
:param snapshots: a list of snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
cgsnapshot_id = group_snapshot.id
cg_name = 'cg_snap-' + cgsnapshot_id
model_update, snapshots_model = (
self._helpers.delete_consistgrp_snapshots(cg_name,
snapshots))
return model_update, snapshots_model
def get_pool(self, volume):
attr = self._helpers.get_vdisk_attributes(volume['name'])
if attr is None:
msg = (_('get_pool: Failed to get attributes for volume '
'%s') % volume['name'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return attr['mdisk_grp_name']
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.protocol
data['pools'] = []
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = (backend_name or
self._state['system_name'])
data['pools'] = [self._build_pool_stats(pool)
for pool in
self._get_backend_pools()]
if self._replica_enabled:
data['replication'] = self._replica_enabled
data['replication_enabled'] = self._replica_enabled
data['replication_targets'] = self._get_replication_targets()
self._stats = data
def _build_pool_stats(self, pool):
"""Build pool status"""
QoS_support = True
pool_stats = {}
try:
pool_data = self._helpers.get_pool_attrs(pool)
if pool_data:
easy_tier = pool_data['easy_tier'] in ['on', 'auto']
total_capacity_gb = float(pool_data['capacity']) / units.Gi
free_capacity_gb = float(pool_data['free_capacity']) / units.Gi
allocated_capacity_gb = (float(pool_data['used_capacity']) /
units.Gi)
provisioned_capacity_gb = float(
pool_data['virtual_capacity']) / units.Gi
rsize = self.configuration.safe_get(
'storwize_svc_vol_rsize')
# rsize of -1 or 100 means fully allocate the mdisk
use_thick_provisioning = rsize == -1 or rsize == 100
over_sub_ratio = self.configuration.safe_get(
'max_over_subscription_ratio')
location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool_data['name']})
multiattach = (self.configuration.
storwize_svc_multihostmap_enabled)
pool_stats = {
'pool_name': pool_data['name'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'allocated_capacity_gb': allocated_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'compression_support': self._state['compression_enabled'],
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': QoS_support,
'consistencygroup_support': True,
'location_info': location_info,
'easytier_support': easy_tier,
'multiattach': multiattach,
'thin_provisioning_support': not use_thick_provisioning,
'thick_provisioning_support': use_thick_provisioning,
'max_over_subscription_ratio': over_sub_ratio,
'consistent_group_snapshot_enabled': True,
}
if self._replica_enabled:
pool_stats.update({
'replication_enabled': self._replica_enabled,
'replication_type': self._supported_replica_types,
'replication_targets': self._get_replication_targets(),
'replication_count': len(self._get_replication_targets())
})
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s.') % pool
raise exception.VolumeBackendAPIException(data=msg)
return pool_stats
def _get_replication_targets(self):
return [self._replica_target['backend_id']]
def _manage_input_check(self, ref):
"""Verify the input of manage function."""
# Check that the reference is valid
if 'source-name' in ref:
manage_source = ref['source-name']
vdisk = self._helpers.get_vdisk_attributes(manage_source)
elif 'source-id' in ref:
manage_source = ref['source-id']
vdisk = self._helpers.vdisk_by_uid(manage_source)
else:
reason = _('Reference must contain source-id or '
'source-name element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
if vdisk is None:
reason = (_('No vdisk with the UID specified by ref %s.')
% manage_source)
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return vdisk
|
eharney/cinder
|
cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
|
Python
|
apache-2.0
| 179,526 | 0.000045 |
#!/usr/bin/python3
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.utils.log"""
import os
import unittest
import logging
import tempfile
import shutil
import threading
from io import FileIO, StringIO
from ganeti import constants
from ganeti import errors
from ganeti import compat
from ganeti import utils
import testutils
class TestLogHandler(unittest.TestCase):
def testNormal(self):
tmpfile = tempfile.NamedTemporaryFile()
handler = utils.log._ReopenableLogHandler(tmpfile.name)
handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
logger = logging.Logger("TestLogger")
logger.addHandler(handler)
self.assertEqual(len(logger.handlers), 1)
logger.error("Test message ERROR")
logger.info("Test message INFO")
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 2)
def testReopen(self):
tmpfile = tempfile.NamedTemporaryFile()
tmpfile2 = tempfile.NamedTemporaryFile()
handler = utils.log._ReopenableLogHandler(tmpfile.name)
self.assertFalse(utils.ReadFile(tmpfile.name))
self.assertFalse(utils.ReadFile(tmpfile2.name))
logger = logging.Logger("TestLoggerReopen")
logger.addHandler(handler)
for _ in range(3):
logger.error("Test message ERROR")
handler.flush()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 3)
before_id = utils.GetFileID(tmpfile.name)
handler.RequestReopen()
self.assertTrue(handler._reopen)
self.assertTrue(utils.VerifyFileID(utils.GetFileID(tmpfile.name),
before_id))
# Rename only after requesting reopen
os.rename(tmpfile.name, tmpfile2.name)
assert not os.path.exists(tmpfile.name)
# Write another message, should reopen
for _ in range(4):
logger.info("Test message INFO")
# Flag must be reset
self.assertFalse(handler._reopen)
self.assertFalse(utils.VerifyFileID(utils.GetFileID(tmpfile.name),
before_id))
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 4)
self.assertEqual(len(utils.ReadFile(tmpfile2.name).splitlines()), 3)
def testConsole(self):
temp_file = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8")
failing_file = self._FailingFile(os.devnull, "w")
for (console, check) in [(None, False),
(temp_file, True),
(failing_file, False)]:
# Create a handler which will fail when handling errors
cls = utils.log._LogErrorsToConsole(self._FailingHandler)
# Instantiate handler with file which will fail when writing,
# provoking a write to the console
failing_output = self._FailingFile(os.devnull)
handler = cls(console, failing_output)
logger = logging.Logger("TestLogger")
logger.addHandler(handler)
self.assertEqual(len(logger.handlers), 1)
# Provoke write
logger.error("Test message ERROR")
# Take everything apart
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
failing_output.close()
if console and check:
console.flush()
# Check console output
consout = utils.ReadFile(console.name)
self.assertTrue("Cannot log message" in consout)
self.assertTrue("Test message ERROR" in consout)
temp_file.close()
failing_file.close()
class _FailingFile(FileIO):
def write(self, _):
raise Exception
class _FailingHandler(logging.StreamHandler):
def handleError(self, _):
raise Exception
class TestSetupLogging(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testSimple(self):
logfile = utils.PathJoin(self.tmpdir, "basic.log")
logger = logging.Logger("TestLogger")
self.assertTrue(callable(utils.SetupLogging(logfile, "test",
console_logging=False,
syslog=constants.SYSLOG_NO,
stderr_logging=False,
multithreaded=False,
root_logger=logger)))
self.assertEqual(utils.ReadFile(logfile), "")
logger.error("This is a test")
# Ensure SetupLogging used custom logger
logging.error("This message should not show up in the test log file")
self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
def testReopen(self):
logfile = utils.PathJoin(self.tmpdir, "reopen.log")
logfile2 = utils.PathJoin(self.tmpdir, "reopen.log.OLD")
logger = logging.Logger("TestLogger")
reopen_fn = utils.SetupLogging(logfile, "test",
console_logging=False,
syslog=constants.SYSLOG_NO,
stderr_logging=False,
multithreaded=False,
root_logger=logger)
self.assertTrue(callable(reopen_fn))
self.assertEqual(utils.ReadFile(logfile), "")
logger.error("This is a test")
self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
os.rename(logfile, logfile2)
assert not os.path.exists(logfile)
# Notify logger to reopen on the next message
reopen_fn()
assert not os.path.exists(logfile)
# Provoke actual reopen
logger.error("First message")
self.assertTrue(utils.ReadFile(logfile).endswith("First message\n"))
self.assertTrue(utils.ReadFile(logfile2).endswith("This is a test\n"))
class TestSetupToolLogging(unittest.TestCase):
def test(self):
error_name = logging.getLevelName(logging.ERROR)
warn_name = logging.getLevelName(logging.WARNING)
info_name = logging.getLevelName(logging.INFO)
debug_name = logging.getLevelName(logging.DEBUG)
for debug in [False, True]:
for verbose in [False, True]:
logger = logging.Logger("TestLogger")
buf = StringIO()
utils.SetupToolLogging(debug, verbose, _root_logger=logger, _stream=buf)
logger.error("level=error")
logger.warning("level=warning")
logger.info("level=info")
logger.debug("level=debug")
lines = buf.getvalue().splitlines()
self.assertTrue(compat.all(line.count(":") == 3 for line in lines))
messages = [line.split(":", 3)[-1].strip() for line in lines]
if debug:
self.assertEqual(messages, [
"%s level=error" % error_name,
"%s level=warning" % warn_name,
"%s level=info" % info_name,
"%s level=debug" % debug_name,
])
elif verbose:
self.assertEqual(messages, [
"%s level=error" % error_name,
"%s level=warning" % warn_name,
"%s level=info" % info_name,
])
else:
self.assertEqual(messages, [
"level=error",
"level=warning",
])
def testThreadName(self):
thread_name = threading.currentThread().getName()
for enable_threadname in [False, True]:
logger = logging.Logger("TestLogger")
buf = StringIO()
utils.SetupToolLogging(True, True, threadname=enable_threadname,
_root_logger=logger, _stream=buf)
logger.debug("test134042376")
lines = buf.getvalue().splitlines()
self.assertEqual(len(lines), 1)
if enable_threadname:
self.assertTrue((" %s " % thread_name) in lines[0])
else:
self.assertTrue(thread_name not in lines[0])
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
ganeti/ganeti
|
test/py/ganeti.utils.log_unittest.py
|
Python
|
bsd-2-clause
| 9,296 | 0.005271 |
# Copyright 2016, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import googleapiclient.discovery
import generate_wrapped_rsa_key
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
def test_main():
generate_wrapped_rsa_key.main(None)
def test_create_disk():
compute = googleapiclient.discovery.build('compute', 'beta')
# Generate the key.
key_bytes = os.urandom(32)
google_public_key = generate_wrapped_rsa_key.get_google_public_cert_key()
wrapped_rsa_key = generate_wrapped_rsa_key.wrap_rsa_key(
google_public_key, key_bytes)
disk_name = 'new-encrypted-disk-{}'.format(uuid.uuid4().hex)
try:
# Create the disk, if the encryption key is invalid, this will raise.
compute.disks().insert(
project=PROJECT,
zone='us-central1-f',
body={
'name': disk_name,
'diskEncryptionKey': {
'rsaEncryptedKey': wrapped_rsa_key.decode('utf-8')
}
}).execute()
finally:
# Delete the disk.
compute.disks().delete(
project=PROJECT,
zone='us-central1-f',
disk=disk_name).execute()
|
GoogleCloudPlatform/python-docs-samples
|
compute/encryption/generate_wrapped_rsa_key_test.py
|
Python
|
apache-2.0
| 1,721 | 0 |
#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import sys
import inspect
import unittest
from mock import patch
from mock import MagicMock
from . import get_driver
from . import get_driver_class
from . import get_driver_names
from .driverbase import VirtDeployDriverBase
if sys.version_info[0] == 3: # pragma: no cover
builtin_import = 'builtins.__import__'
else: # pragma: no cover
builtin_import = '__builtin__.__import__'
def try_import(spec):
def fake_import(name, globals={}, locals={}, fromlist=[], level=0):
try:
return spec(name, globals, locals, fromlist, level)
except ImportError:
return MagicMock()
return fake_import
class TestVirtDeployDriverBase(unittest.TestCase):
def _get_driver_methods(self):
return inspect.getmembers(VirtDeployDriverBase, inspect.ismethod)
def _get_driver_class(self, name):
with patch(builtin_import, spec=True, new_callable=try_import):
return get_driver_class(name)
def _get_driver(self, name):
with patch(builtin_import, spec=True, new_callable=try_import):
return get_driver(name)
def test_base_not_implemented(self):
driver = VirtDeployDriverBase()
for name, method in self._get_driver_methods():
spec = inspect.getargspec(method)
with self.assertRaises(NotImplementedError) as cm:
getattr(driver, name)(*(None,) * (len(spec.args) - 1))
self.assertEqual(cm.exception.args[0], name)
def test_drivers_interface(self):
for driver_name in get_driver_names():
driver = self._get_driver_class(driver_name)
for name, method in self._get_driver_methods():
driver_method = getattr(driver, name)
self.assertNotEqual(driver_method, method)
self.assertEqual(inspect.getargspec(method),
inspect.getargspec(driver_method))
def test_get_drivers(self):
for driver_name in get_driver_names():
driver = self._get_driver(driver_name)
self.assertTrue(isinstance(driver, VirtDeployDriverBase))
|
jaryn/virt-deploy
|
virtdeploy/test_driverbase.py
|
Python
|
gpl-2.0
| 2,986 | 0 |
import os
import logging
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import \
QWidget, QSplitter, QTreeWidget, QTreeWidgetItem, QMenu, \
QTableWidgetItem
from sheetwidget import SheetWidget
from commonwidgets import *
class SourcesWidget(QSplitter):
updateRequested = pyqtSignal(name='updateRequested')
def __init__(self):
super().__init__(Qt.Horizontal)
self.tree = QTreeWidget()
self.blank = QWidget()
self.addWidget(self.tree)
self.addWidget(self.blank)
self.tree.header().hide()
self.tree.itemSelectionChanged.connect(self.treeItemSelectionChanged)
self.tree.itemChanged.connect(lambda item, col: self.updateRequested.emit())
self.tree.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree.customContextMenuRequested.connect(self.treeContextMenuRequested)
self.fileMenu = QMenu()
self.fileMenu.addAction('&Remove file').triggered.connect(
lambda: self.removeFile(self.fileMenu.target))
self.sheets = []
def treeItemSelectionChanged(self):
items = self.tree.selectedItems()
if len(items) == 0:
self.replaceWidget(1, self.blank)
return
item = items[0]
sw = item.data(0, Qt.UserRole)[0]
if not isinstance(sw, SheetWidget):
self.replaceWidget(1, self.blank)
return
self.replaceWidget(1, sw)
def topLevelItemForFilename(self, filename):
for i in range(self.tree.topLevelItemCount()):
item = self.tree.topLevelItem(i)
if item.data(0, Qt.UserRole)[0] == filename:
return item
return None
def addFile(self, filename, checked, expanded, sheets):
fitem = self.topLevelItemForFilename(filename)
if fitem is not None:
self.tree.takeTopLevelItem(fitem)
fitem = QTreeWidgetItem([os.path.basename(filename)])
fitem.setData(0, Qt.UserRole, (filename,))
fitem.setFlags((fitem.flags() | Qt.ItemIsUserCheckable) & ~Qt.ItemIsSelectable)
fitem.setCheckState(0, Qt.Checked if checked else Qt.Unchecked)
self.tree.addTopLevelItem(fitem)
fitem.setExpanded(expanded)
for sheet, checked in sheets:
self.addSheet(fitem, sheet, checked)
def addSheet(self, fitem, sheet, checked):
def copyInput(fitem, target, toall):
key = {
'x': 'xFormula',
'y': 'yFormula',
'xrange': 'xRange'
}[target]
val = getattr(sw.sheet, key).strValue()
if toall:
fitems = []
for i in range(self.tree.topLevelItemCount()):
fitems.append(self.tree.topLevelItem(i))
else:
fitems = [fitem]
for fitem in fitems:
for j in range(fitem.childCount()):
sitem = fitem.child(j)
sw_ = sitem.data(0, Qt.UserRole)[0]
if sw_ != sw:
getattr(sw_.sheet, key).setStrValue(val)
sw = SheetWidget(sheet)
sw.copyInputRequested.connect(lambda *a: copyInput(fitem, *a))
item = QTreeWidgetItem([sheet.name])
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setData(0, Qt.UserRole, (sw,))
item.setCheckState(0, Qt.Checked if checked else Qt.Unchecked)
fitem.addChild(item)
def removeAllFiles(self):
while self.tree.topLevelItemCount() > 0:
self.tree.takeTopLevelItem(0)
def files(self):
files = []
for i in range(self.tree.topLevelItemCount()):
fitem = self.tree.topLevelItem(i)
filename = fitem.data(0, Qt.UserRole)[0]
sheets = []
for j in range(fitem.childCount()):
sitem = fitem.child(j)
sw = sitem.data(0, Qt.UserRole)[0]
sheets.append((sw, sitem.checkState(0) == Qt.Checked))
if len(sheets) > 0:
files.append({
'filename': filename,
'enabled': fitem.checkState(0) == Qt.Checked,
'expanded': fitem.isExpanded(),
'sheets': sheets
})
return files
def enabledSheetWidgets(self):
return sum([[sw for sw, c in f['sheets'] if c] for f in self.files() if f['enabled']], [])
def siblingSheetWidgets(self, sheetwidget):
for i in range(self.tree.topLevelItemCount()):
fitem = self.tree.topLevelItem(i)
widgets = []
hit = False
for j in range(fitem.childCount()):
sitem = fitem.child(j)
sw = sitem.data(0, Qt.UserRole)[0]
if sitem.checkState(0) == Qt.Checked: widgets.append(sw)
if sw == sheetwidget: hit = True
return widgets
return []
def removeFile(self, item):
idx = self.tree.indexOfTopLevelItem(item)
if idx >= 0:
self.tree.takeTopLevelItem(idx)
def treeContextMenuRequested(self, pos):
item = self.tree.itemAt(pos)
if self.tree.indexOfTopLevelItem(item) >= 0:
self.fileMenu.target = item
self.fileMenu.exec_(QCursor.pos())
|
takumak/tuna
|
src/sourceswidget.py
|
Python
|
mit
| 4,761 | 0.010292 |
# projectParams.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
STUDENT_CODE_DEFAULT = 'multiAgents.py'
PROJECT_TEST_CLASSES = 'multiagentTestClasses.py'
PROJECT_NAME = 'Project 2: Multiagent search'
BONUS_PIC = False
|
jpszerzp/sample_AI
|
projectParams.py
|
Python
|
apache-2.0
| 818 | 0.002445 |
"""
This module holds models related to benefits features and configurations
"""
from django import forms
from django.db import models
from django.db.models import UniqueConstraint
from django.urls import reverse
from polymorphic.models import PolymorphicModel
from sponsors.models.assets import ImgAsset, TextAsset, FileAsset, ResponseAsset, Response
from sponsors.models.enums import (
PublisherChoices,
LogoPlacementChoices,
AssetsRelatedTo,
)
########################################
# Benefit features abstract classes
from sponsors.models.managers import BenefitFeatureQuerySet
########################################
# Benefit features abstract classes
class BaseLogoPlacement(models.Model):
publisher = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in PublisherChoices],
verbose_name="Publisher",
help_text="On which site should the logo be displayed?"
)
logo_place = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in LogoPlacementChoices],
verbose_name="Logo Placement",
help_text="Where the logo should be placed?"
)
link_to_sponsors_page = models.BooleanField(
default=False,
help_text="Override URL in placement to the PSF Sponsors Page, rather than the sponsor landing page url.",
)
describe_as_sponsor = models.BooleanField(
default=False,
help_text='Override description with "SPONSOR_NAME is a SPONSOR_LEVEL sponsor of the Python Software Foundation".',
)
class Meta:
abstract = True
class BaseTieredQuantity(models.Model):
package = models.ForeignKey("sponsors.SponsorshipPackage", on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
class Meta:
abstract = True
class BaseEmailTargetable(models.Model):
class Meta:
abstract = True
class BaseAsset(models.Model):
ASSET_CLASS = None
related_to = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in AssetsRelatedTo],
verbose_name="Related To",
help_text="To which instance (Sponsor or Sponsorship) should this asset relate to."
)
internal_name = models.CharField(
max_length=128,
verbose_name="Internal Name",
help_text="Unique name used internally to control if the sponsor/sponsorship already has the asset",
unique=False,
db_index=True,
)
label = models.CharField(
max_length=256,
help_text="What's the title used to display the input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
class Meta:
abstract = True
class BaseRequiredAsset(BaseAsset):
due_date = models.DateField(default=None, null=True, blank=True)
class Meta:
abstract = True
class BaseProvidedAsset(BaseAsset):
shared = models.BooleanField(
default = False,
)
def shared_value(self):
return None
class Meta:
abstract = True
class AssetConfigurationMixin:
"""
This class should be used to implement assets configuration.
It's a mixin to updates the benefit feature creation to also
create the related assets models
"""
def create_benefit_feature(self, sponsor_benefit, **kwargs):
if not self.ASSET_CLASS:
raise NotImplementedError(
"Subclasses of AssetConfigurationMixin must define an ASSET_CLASS attribute.")
# Super: BenefitFeatureConfiguration.create_benefit_feature
benefit_feature = super().create_benefit_feature(sponsor_benefit, **kwargs)
content_object = sponsor_benefit.sponsorship
if self.related_to == AssetsRelatedTo.SPONSOR.value:
content_object = sponsor_benefit.sponsorship.sponsor
asset_qs = content_object.assets.filter(internal_name=self.internal_name)
if not asset_qs.exists():
asset = self.ASSET_CLASS(
content_object=content_object, internal_name=self.internal_name,
)
asset.save()
return benefit_feature
class Meta:
abstract = True
class BaseRequiredImgAsset(BaseRequiredAsset):
ASSET_CLASS = ImgAsset
min_width = models.PositiveIntegerField()
max_width = models.PositiveIntegerField()
min_height = models.PositiveIntegerField()
max_height = models.PositiveIntegerField()
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseRequiredTextAsset(BaseRequiredAsset):
ASSET_CLASS = TextAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the text input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
max_length = models.IntegerField(
default=None,
help_text="Limit to length of the input, empty means unlimited",
null=True,
blank=True,
)
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseRequiredResponseAsset(BaseRequiredAsset):
ASSET_CLASS = ResponseAsset
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseProvidedTextAsset(BaseProvidedAsset):
ASSET_CLASS = TextAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the text input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
class Meta(BaseProvidedAsset.Meta):
abstract = True
class BaseProvidedFileAsset(BaseProvidedAsset):
ASSET_CLASS = FileAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the file to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the file should be used",
default="",
blank=True
)
shared_file = models.FileField(blank=True, null=True)
def shared_value(self):
return self.shared_file
class Meta(BaseProvidedAsset.Meta):
abstract = True
class AssetMixin:
def __related_asset(self):
"""
This method exists to avoid FK relationships between the GenericAsset
and reuired asset objects. This is to decouple the assets set up from the
real assets value in a way that, if the first gets deleted, the second can
still be re used.
"""
object = self.sponsor_benefit.sponsorship
if self.related_to == AssetsRelatedTo.SPONSOR.value:
object = self.sponsor_benefit.sponsorship.sponsor
return object.assets.get(internal_name=self.internal_name)
@property
def value(self):
asset = self.__related_asset()
return asset.value
@value.setter
def value(self, value):
asset = self.__related_asset()
asset.value = value
asset.save()
@property
def user_edit_url(self):
url = reverse("users:update_sponsorship_assets", args=[self.sponsor_benefit.sponsorship.pk])
return url + f"?required_asset={self.pk}"
@property
def user_view_url(self):
url = reverse("users:view_provided_sponsorship_assets", args=[self.sponsor_benefit.sponsorship.pk])
return url + f"?provided_asset={self.pk}"
class RequiredAssetMixin(AssetMixin):
"""
This class should be used to implement required assets.
It's a mixin to get the information submitted by the user
and which is stored in the related asset class.
"""
pass
class ProvidedAssetMixin(AssetMixin):
"""
This class should be used to implement provided assets.
It's a mixin to get the information submitted by the staff
and which is stored in the related asset class.
"""
@AssetMixin.value.getter
def value(self):
if hasattr(self, 'shared') and self.shared:
return self.shared_value()
return super().value
######################################################
# SponsorshipBenefit features configuration models
class BenefitFeatureConfiguration(PolymorphicModel):
"""
Base class for sponsorship benefits configuration.
"""
benefit = models.ForeignKey("sponsors.SponsorshipBenefit", on_delete=models.CASCADE)
class Meta:
verbose_name = "Benefit Feature Configuration"
verbose_name_plural = "Benefit Feature Configurations"
@property
def benefit_feature_class(self):
"""
Return a subclass of BenefitFeature related to this configuration.
Every configuration subclass must implement this property
"""
raise NotImplementedError
def get_benefit_feature_kwargs(self, **kwargs):
"""
Return kwargs dict to initialize the benefit feature.
If the benefit should not be created, return None instead.
"""
# Get all fields from benefit feature configuration base model
base_fields = set(BenefitFeatureConfiguration._meta.get_fields())
# Get only the fields from the abstract base feature model
benefit_fields = set(self._meta.get_fields()) - base_fields
# Configure the related benefit feature using values from the configuration
for field in benefit_fields:
# Skip the OneToOne rel from the base class to BenefitFeatureConfiguration base class
# since this field only exists in child models
if BenefitFeatureConfiguration is getattr(field, 'related_model', None):
continue
kwargs[field.name] = getattr(self, field.name)
return kwargs
def get_benefit_feature(self, **kwargs):
"""
Returns an instance of a configured type of BenefitFeature
"""
BenefitFeatureClass = self.benefit_feature_class
kwargs = self.get_benefit_feature_kwargs(**kwargs)
if kwargs is None:
return None
return BenefitFeatureClass(**kwargs)
def display_modifier(self, name, **kwargs):
return name
def create_benefit_feature(self, sponsor_benefit, **kwargs):
"""
This methods persists a benefit feature from the configuration
"""
feature = self.get_benefit_feature(sponsor_benefit=sponsor_benefit, **kwargs)
if feature is not None:
feature.save()
return feature
class LogoPlacementConfiguration(BaseLogoPlacement, BenefitFeatureConfiguration):
"""
Configuration to control how sponsor logo should be placed
"""
class Meta(BaseLogoPlacement.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Logo Placement Configuration"
verbose_name_plural = "Logo Placement Configurations"
@property
def benefit_feature_class(self):
return LogoPlacement
def __str__(self):
return f"Logo Configuration for {self.get_publisher_display()} at {self.get_logo_place_display()}"
class TieredQuantityConfiguration(BaseTieredQuantity, BenefitFeatureConfiguration):
"""
Configuration for tiered quantities among packages
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Tiered Benefit Configuration"
verbose_name_plural = "Tiered Benefit Configurations"
@property
def benefit_feature_class(self):
return TieredQuantity
def get_benefit_feature_kwargs(self, **kwargs):
if kwargs["sponsor_benefit"].sponsorship.package == self.package:
return super().get_benefit_feature_kwargs(**kwargs)
return None
def __str__(self):
return f"Tiered Quantity Configuration for {self.benefit} and {self.package} ({self.quantity})"
def display_modifier(self, name, **kwargs):
if kwargs.get("package") != self.package:
return name
return f"{name} ({self.quantity})"
class EmailTargetableConfiguration(BaseEmailTargetable, BenefitFeatureConfiguration):
"""
Configuration for email targeatable benefits
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Email Targetable Configuration"
verbose_name_plural = "Email Targetable Configurations"
@property
def benefit_feature_class(self):
return EmailTargetable
def __str__(self):
return f"Email targeatable configuration"
class RequiredImgAssetConfiguration(AssetConfigurationMixin, BaseRequiredImgAsset, BenefitFeatureConfiguration):
class Meta(BaseRequiredImgAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Require Image Configuration"
verbose_name_plural = "Require Image Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_img_asset_cfg")]
def __str__(self):
return f"Require image configuration"
@property
def benefit_feature_class(self):
return RequiredImgAsset
class RequiredTextAssetConfiguration(AssetConfigurationMixin, BaseRequiredTextAsset,
BenefitFeatureConfiguration):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Require Text Configuration"
verbose_name_plural = "Require Text Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_text_asset_cfg")]
def __str__(self):
return f"Require text configuration"
@property
def benefit_feature_class(self):
return RequiredTextAsset
class RequiredResponseAssetConfiguration(
AssetConfigurationMixin, BaseRequiredResponseAsset, BenefitFeatureConfiguration
):
class Meta(BaseRequiredResponseAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Require Response Configuration"
verbose_name_plural = "Require Response Configurations"
constraints = [
UniqueConstraint(fields=["internal_name"], name="uniq_response_asset_cfg")
]
def __str__(self):
return f"Require response configuration"
@property
def benefit_feature_class(self):
return RequiredResponseAsset
class ProvidedTextAssetConfiguration(
AssetConfigurationMixin, BaseProvidedTextAsset, BenefitFeatureConfiguration
):
class Meta(BaseProvidedTextAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Provided Text Configuration"
verbose_name_plural = "Provided Text Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_provided_text_asset_cfg")]
def __str__(self):
return f"Provided text configuration"
@property
def benefit_feature_class(self):
return ProvidedTextAsset
class ProvidedFileAssetConfiguration(AssetConfigurationMixin, BaseProvidedFileAsset,
BenefitFeatureConfiguration):
class Meta(BaseProvidedFileAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = "Provided File Configuration"
verbose_name_plural = "Provided File Configurations"
constraints = [UniqueConstraint(fields=["internal_name"], name="uniq_provided_file_asset_cfg")]
def __str__(self):
return f"Provided File configuration"
@property
def benefit_feature_class(self):
return ProvidedFileAsset
####################################
# SponsorBenefit features models
class BenefitFeature(PolymorphicModel):
"""
Base class for sponsor benefits features.
"""
objects = BenefitFeatureQuerySet.as_manager()
sponsor_benefit = models.ForeignKey("sponsors.SponsorBenefit", on_delete=models.CASCADE)
class Meta:
verbose_name = "Benefit Feature"
verbose_name_plural = "Benefit Features"
def display_modifier(self, name, **kwargs):
return name
class LogoPlacement(BaseLogoPlacement, BenefitFeature):
"""
Logo Placement feature for sponsor benefits
"""
class Meta(BaseLogoPlacement.Meta, BenefitFeature.Meta):
verbose_name = "Logo Placement"
verbose_name_plural = "Logo Placement"
def __str__(self):
return f"Logo for {self.get_publisher_display()} at {self.get_logo_place_display()}"
class TieredQuantity(BaseTieredQuantity, BenefitFeature):
"""
Tiered Quantity feature for sponsor benefits
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeature.Meta):
verbose_name = "Tiered Quantity"
verbose_name_plural = "Tiered Quantities"
def display_modifier(self, name, **kwargs):
return f"{name} ({self.quantity})"
def __str__(self):
return f"{self.quantity} of {self.sponsor_benefit} for {self.package}"
class EmailTargetable(BaseEmailTargetable, BenefitFeature):
"""
For email targeatable benefits
"""
class Meta(BaseTieredQuantity.Meta, BenefitFeature.Meta):
verbose_name = "Email Targetable Benefit"
verbose_name_plural = "Email Targetable Benefits"
def __str__(self):
return f"Email targeatable"
class RequiredImgAsset(RequiredAssetMixin, BaseRequiredImgAsset, BenefitFeature):
class Meta(BaseRequiredImgAsset.Meta, BenefitFeature.Meta):
verbose_name = "Require Image"
verbose_name_plural = "Require Images"
def __str__(self):
return f"Require image"
def as_form_field(self, **kwargs):
help_text = kwargs.pop("help_text", self.help_text)
label = kwargs.pop("label", self.label)
required = kwargs.pop("required", False)
return forms.ImageField(required=required, help_text=help_text, label=label, widget=forms.ClearableFileInput, **kwargs)
class RequiredTextAsset(RequiredAssetMixin, BaseRequiredTextAsset, BenefitFeature):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeature.Meta):
verbose_name = "Require Text"
verbose_name_plural = "Require Texts"
def __str__(self):
return f"Require text"
def as_form_field(self, **kwargs):
help_text = kwargs.pop("help_text", self.help_text)
label = kwargs.pop("label", self.label)
required = kwargs.pop("required", False)
max_length = self.max_length
widget = forms.TextInput
if max_length is None or max_length > 256:
widget = forms.Textarea
return forms.CharField(required=required, help_text=help_text, label=label, widget=widget, **kwargs)
class RequiredResponseAsset(RequiredAssetMixin, BaseRequiredResponseAsset, BenefitFeature):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeature.Meta):
verbose_name = "Require Response"
verbose_name_plural = "Required Responses"
def __str__(self):
return f"Require response"
def as_form_field(self, **kwargs):
help_text = kwargs.pop("help_text", self.help_text)
label = kwargs.pop("label", self.label)
required = kwargs.pop("required", False)
return forms.ChoiceField(required=required, choices=Response.choices(), widget=forms.RadioSelect, help_text=help_text, label=label, **kwargs)
class ProvidedTextAsset(ProvidedAssetMixin, BaseProvidedTextAsset, BenefitFeature):
class Meta(BaseProvidedTextAsset.Meta, BenefitFeature.Meta):
verbose_name = "Provided Text"
verbose_name_plural = "Provided Texts"
def __str__(self):
return f"Provided text {self.internal_name}"
class ProvidedFileAsset(ProvidedAssetMixin, BaseProvidedFileAsset, BenefitFeature):
class Meta(BaseProvidedFileAsset.Meta, BenefitFeature.Meta):
verbose_name = "Provided File"
verbose_name_plural = "Provided Files"
def __str__(self):
return f"Provided file"
|
manhhomienbienthuy/pythondotorg
|
sponsors/models/benefits.py
|
Python
|
apache-2.0
| 20,095 | 0.002787 |
""" EPYNET Classes """
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
|
VitensTC/epynet
|
epynet/node.py
|
Python
|
apache-2.0
| 4,030 | 0.00397 |
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.delete_log_messages()
def test_update_configuration_delete_pass(self):
"""Verify 422 and force successfully returns True."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": True}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
(422, {u"invalidFieldsIfKnown": None,
u"errorMessage": u"Configuration change...",
u"localizedMessage": u"Configuration change...",
u"retcode": u"auditLogImmediateFullCondition",
u"codeType": u"devicemgrerror"}),
(200, None),
(200, None)]):
self.assertTrue(audit_log.update_configuration())
def test_update_configuration_delete_skip_fail(self):
"""Verify 422 and no force results in AnsibleJsonFail exception."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": False}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
(200, None), (200, None)]):
audit_log.update_configuration()
|
alxgu/ansible
|
test/units/modules/storage/netapp/test_netapp_e_auditlog.py
|
Python
|
gpl-3.0
| 10,758 | 0.003346 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2011 Pedro Algarvio <pedro@algarvio.me>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import os
from hashlib import sha1 as sha
import gtk
import pygtk
import deluge.common
import deluge.component as component
from deluge.configmanager import ConfigManager, get_config_dir
from deluge.error import AuthManagerError, NotAuthorizedError
from deluge.ui.client import client
from deluge.ui.gtkui.common import associate_magnet_links, get_deluge_icon
from deluge.ui.gtkui.dialogs import AccountDialog, ErrorDialog, InformationDialog, YesNoDialog
from deluge.ui.gtkui.path_chooser import PathChooser
pygtk.require('2.0')
log = logging.getLogger(__name__)
ACCOUNTS_USERNAME, ACCOUNTS_LEVEL, ACCOUNTS_PASSWORD = range(3)
COLOR_MISSING, COLOR_WAITING, COLOR_DOWNLOADING, COLOR_COMPLETED = range(4)
COLOR_STATES = {
"missing": COLOR_MISSING,
"waiting": COLOR_WAITING,
"downloading": COLOR_DOWNLOADING,
"completed": COLOR_COMPLETED
}
class Preferences(component.Component):
def __init__(self):
component.Component.__init__(self, "Preferences")
self.builder = gtk.Builder()
self.builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "preferences_dialog.ui")
))
self.pref_dialog = self.builder.get_object("pref_dialog")
self.pref_dialog.set_transient_for(component.get("MainWindow").window)
self.pref_dialog.set_icon(get_deluge_icon())
self.treeview = self.builder.get_object("treeview")
self.notebook = self.builder.get_object("notebook")
self.gtkui_config = ConfigManager("gtkui.conf")
self.window_open = False
self.load_pref_dialog_state()
self.builder.get_object("image_magnet").set_from_file(
deluge.common.get_pixmap("magnet.png"))
# Hide the unused associate magnet button on OSX see: #2420
if deluge.common.osx_check():
self.builder.get_object("button_associate_magnet").hide()
# Setup the liststore for the categories (tab pages)
self.liststore = gtk.ListStore(int, str)
self.treeview.set_model(self.liststore)
render = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Categories"), render, text=1)
self.treeview.append_column(column)
# Add the default categories
i = 0
for category in (_("Interface"), _("Downloads"), _("Bandwidth"), _("Queue"), _("Network"),
_("Proxy"), _("Cache"), _("Other"), _("Daemon"), _("Plugins"), "_separator_"):
self.liststore.append([i, category])
i += 1
def set_separator(model, iter, data=None):
if "_separator_" == model.get_value(iter, 1):
return True
self.treeview.set_row_separator_func(set_separator)
# Setup accounts tab lisview
self.accounts_levels_mapping = None
self.accounts_authlevel = self.builder.get_object("accounts_authlevel")
self.accounts_liststore = gtk.ListStore(str, str, str, int)
self.accounts_liststore.set_sort_column_id(ACCOUNTS_USERNAME, gtk.SORT_ASCENDING)
self.accounts_listview = self.builder.get_object("accounts_listview")
self.accounts_listview.append_column(
gtk.TreeViewColumn(
_("Username"), gtk.CellRendererText(), text=ACCOUNTS_USERNAME
)
)
self.accounts_listview.append_column(
gtk.TreeViewColumn(
_("Level"), gtk.CellRendererText(), text=ACCOUNTS_LEVEL
)
)
password_column = gtk.TreeViewColumn(
'password', gtk.CellRendererText(), text=ACCOUNTS_PASSWORD
)
self.accounts_listview.append_column(password_column)
password_column.set_visible(False)
self.accounts_listview.set_model(self.accounts_liststore)
self.accounts_listview.get_selection().connect(
"changed", self._on_accounts_selection_changed
)
self.accounts_frame = self.builder.get_object("AccountsFrame")
# Setup plugin tab listview
# The third entry is for holding translated plugin names
self.plugin_liststore = gtk.ListStore(str, bool, str)
self.plugin_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.plugin_listview = self.builder.get_object("plugin_listview")
self.plugin_listview.set_model(self.plugin_liststore)
render = gtk.CellRendererToggle()
render.connect("toggled", self.on_plugin_toggled)
render.set_property("activatable", True)
self.plugin_listview.append_column(
gtk.TreeViewColumn(_("Enabled"), render, active=1))
self.plugin_listview.append_column(
gtk.TreeViewColumn(_("Plugin"), gtk.CellRendererText(), text=2))
# Connect to the 'changed' event of TreeViewSelection to get selection
# changes.
self.treeview.get_selection().connect(
"changed", self.on_selection_changed
)
self.plugin_listview.get_selection().connect(
"changed", self.on_plugin_selection_changed
)
self.builder.connect_signals({
"on_pref_dialog_delete_event": self.on_pref_dialog_delete_event,
"on_button_ok_clicked": self.on_button_ok_clicked,
"on_button_apply_clicked": self.on_button_apply_clicked,
"on_button_cancel_clicked": self.on_button_cancel_clicked,
"on_toggle": self.on_toggle,
"on_test_port_clicked": self.on_test_port_clicked,
"on_button_plugin_install_clicked": self._on_button_plugin_install_clicked,
"on_button_rescan_plugins_clicked": self._on_button_rescan_plugins_clicked,
"on_button_find_plugins_clicked": self._on_button_find_plugins_clicked,
"on_button_cache_refresh_clicked": self._on_button_cache_refresh_clicked,
"on_combo_encryption_changed": self._on_combo_encryption_changed,
"on_combo_proxy_type_changed": self._on_combo_proxy_type_changed,
"on_button_associate_magnet_clicked": self._on_button_associate_magnet_clicked,
"on_accounts_add_clicked": self._on_accounts_add_clicked,
"on_accounts_delete_clicked": self._on_accounts_delete_clicked,
"on_accounts_edit_clicked": self._on_accounts_edit_clicked,
"on_piecesbar_toggle_toggled": self._on_piecesbar_toggle_toggled,
"on_completed_color_set": self._on_completed_color_set,
"on_revert_color_completed_clicked": self._on_revert_color_completed_clicked,
"on_downloading_color_set": self._on_downloading_color_set,
"on_revert_color_downloading_clicked": self._on_revert_color_downloading_clicked,
"on_waiting_color_set": self._on_waiting_color_set,
"on_revert_color_waiting_clicked": self._on_revert_color_waiting_clicked,
"on_missing_color_set": self._on_missing_color_set,
"on_revert_color_missing_clicked": self._on_revert_color_missing_clicked,
"on_pref_dialog_configure_event": self.on_pref_dialog_configure_event,
"on_checkbutton_language_toggled": self._on_checkbutton_language_toggled,
})
if not deluge.common.osx_check() and not deluge.common.windows_check():
try:
import appindicator
assert appindicator # silence pyflakes
except ImportError:
pass
else:
self.builder.get_object("alignment_tray_type").set_visible(True)
from deluge.ui.gtkui.gtkui import DEFAULT_PREFS
self.COLOR_DEFAULTS = {}
for key in ("missing", "waiting", "downloading", "completed"):
self.COLOR_DEFAULTS[key] = DEFAULT_PREFS["pieces_color_%s" % key][:]
del DEFAULT_PREFS
# These get updated by requests done to the core
self.all_plugins = []
self.enabled_plugins = []
self.setup_path_choosers()
self.load_languages()
def setup_path_choosers(self):
self.download_location_hbox = self.builder.get_object("hbox_download_to_path_chooser")
self.download_location_path_chooser = PathChooser("download_location_paths_list")
self.download_location_hbox.add(self.download_location_path_chooser)
self.download_location_hbox.show_all()
self.move_completed_hbox = self.builder.get_object("hbox_move_completed_to_path_chooser")
self.move_completed_path_chooser = PathChooser("move_completed_paths_list")
self.move_completed_hbox.add(self.move_completed_path_chooser)
self.move_completed_hbox.show_all()
self.copy_torrents_to_hbox = self.builder.get_object("hbox_copy_torrent_files_path_chooser")
self.copy_torrent_files_path_chooser = PathChooser("copy_torrent_files_to_paths_list")
self.copy_torrents_to_hbox.add(self.copy_torrent_files_path_chooser)
self.copy_torrents_to_hbox.show_all()
def load_languages(self):
from deluge.ui import languages # Import here so that gettext has been setup first
translations_path = deluge.common.get_translations_path()
for root, dirs, files in os.walk(translations_path):
# Get the dirs
break
self.language_combo = self.builder.get_object("combobox_language")
self.language_checkbox = self.builder.get_object("checkbutton_language")
lang_model = self.language_combo.get_model()
index = -1
for i, lang_code in enumerate(sorted(dirs)):
name = "%s (Language name missing)" % lang_code
if lang_code in languages.LANGUAGES:
name = languages.LANGUAGES[lang_code]
lang_model.append([lang_code, name])
if self.gtkui_config["language"] == lang_code:
index = i
if self.gtkui_config["language"] is None:
self.language_checkbox.set_active(True)
self.language_combo.set_visible(False)
elif index != -1:
self.language_combo.set_active(index)
def __del__(self):
del self.gtkui_config
def add_page(self, name, widget):
"""Add a another page to the notebook"""
# Create a header and scrolled window for the preferences tab
parent = widget.get_parent()
if parent:
parent.remove(widget)
vbox = gtk.VBox()
label = gtk.Label()
label.set_use_markup(True)
label.set_markup("<b><i><big>" + name + "</big></i></b>")
label.set_alignment(0.00, 0.50)
label.set_padding(10, 10)
vbox.pack_start(label, False, True, 0)
sep = gtk.HSeparator()
vbox.pack_start(sep, False, True, 0)
align = gtk.Alignment()
align.set_padding(5, 0, 0, 0)
align.set(0, 0, 1, 1)
align.add(widget)
vbox.pack_start(align, True, True, 0)
scrolled = gtk.ScrolledWindow()
viewport = gtk.Viewport()
viewport.set_shadow_type(gtk.SHADOW_NONE)
viewport.add(vbox)
scrolled.add(viewport)
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled.show_all()
# Add this page to the notebook
index = self.notebook.append_page(scrolled)
self.liststore.append([index, name])
return name
def remove_page(self, name):
"""Removes a page from the notebook"""
self.page_num_to_remove = None
self.iter_to_remove = None
def check_row(model, path, iter, user_data):
row_name = model.get_value(iter, 1)
if row_name == user_data:
# This is the row we need to remove
self.page_num_to_remove = model.get_value(iter, 0)
self.iter_to_remove = iter
return
self.liststore.foreach(check_row, name)
# Remove the page and row
if self.page_num_to_remove is not None:
self.notebook.remove_page(self.page_num_to_remove)
if self.iter_to_remove is not None:
self.liststore.remove(self.iter_to_remove)
# We need to re-adjust the index values for the remaining pages
for i, (index, name) in enumerate(self.liststore):
self.liststore[i][0] = i
def show(self, page=None):
"""Page should be the string in the left list.. ie, 'Network' or
'Bandwidth'"""
self.window_open = True
if page is not None:
for (index, string) in self.liststore:
if page == string:
self.treeview.get_selection().select_path(index)
break
component.get("PluginManager").run_on_show_prefs()
# Update the preferences dialog to reflect current config settings
self.core_config = {}
if client.connected():
self._get_accounts_tab_data()
def _on_get_config(config):
self.core_config = config
client.core.get_available_plugins().addCallback(_on_get_available_plugins)
def _on_get_available_plugins(plugins):
self.all_plugins = plugins
client.core.get_enabled_plugins().addCallback(_on_get_enabled_plugins)
def _on_get_enabled_plugins(plugins):
self.enabled_plugins = plugins
client.core.get_listen_port().addCallback(_on_get_listen_port)
def _on_get_listen_port(port):
self.active_port = port
client.core.get_cache_status().addCallback(_on_get_cache_status)
def _on_get_cache_status(status):
self.cache_status = status
self._show()
# This starts a series of client.core requests prior to showing the window
client.core.get_config().addCallback(_on_get_config)
else:
self._show()
def start(self):
if self.window_open:
self.show()
def stop(self):
self.core_config = None
if self.window_open:
self._show()
def _show(self):
self.is_connected = self.core_config != {} and self.core_config is not None
core_widgets = {
"chk_move_completed": ("active", "move_completed"),
"chk_copy_torrent_file": ("active", "copy_torrent_file"),
"chk_del_copy_torrent_file": ("active", "del_copy_torrent_file"),
"chk_pre_allocation": ("active", "pre_allocate_storage"),
"chk_prioritize_first_last_pieces": ("active", "prioritize_first_last_pieces"),
"chk_sequential_download": ("active", "sequential_download"),
"chk_add_paused": ("active", "add_paused"),
"active_port_label": ("text", lambda: str(self.active_port)),
"spin_port_min": ("value", lambda: self.core_config["listen_ports"][0]),
"spin_port_max": ("value", lambda: self.core_config["listen_ports"][1]),
"chk_random_port": ("active", "random_port"),
"spin_outgoing_port_min": ("value", lambda: self.core_config["outgoing_ports"][0]),
"spin_outgoing_port_max": ("value", lambda: self.core_config["outgoing_ports"][1]),
"chk_random_outgoing_ports": ("active", "random_outgoing_ports"),
"entry_interface": ("text", "listen_interface"),
"entry_peer_tos": ("text", "peer_tos"),
"chk_dht": ("active", "dht"),
"chk_upnp": ("active", "upnp"),
"chk_natpmp": ("active", "natpmp"),
"chk_utpex": ("active", "utpex"),
"chk_lt_tex": ("active", "lt_tex"),
"chk_lsd": ("active", "lsd"),
"chk_new_releases": ("active", "new_release_check"),
"chk_send_info": ("active", "send_info"),
"entry_geoip": ("text", "geoip_db_location"),
"combo_encin": ("active", "enc_in_policy"),
"combo_encout": ("active", "enc_out_policy"),
"combo_enclevel": ("active", "enc_level"),
"spin_max_connections_global": ("value", "max_connections_global"),
"spin_max_download": ("value", "max_download_speed"),
"spin_max_upload": ("value", "max_upload_speed"),
"spin_max_upload_slots_global": ("value", "max_upload_slots_global"),
"spin_max_half_open_connections": ("value", "max_connections_per_second"),
"spin_max_connections_per_second": ("value", "max_connections_per_second"),
"chk_ignore_limits_on_local_network": ("active", "ignore_limits_on_local_network"),
"chk_rate_limit_ip_overhead": ("active", "rate_limit_ip_overhead"),
"chk_anonymous_mode": ("active", "anonymous_mode"),
"spin_max_connections_per_torrent": ("value", "max_connections_per_torrent"),
"spin_max_upload_slots_per_torrent": ("value", "max_upload_slots_per_torrent"),
"spin_max_download_per_torrent": ("value", "max_download_speed_per_torrent"),
"spin_max_upload_per_torrent": ("value", "max_upload_speed_per_torrent"),
"spin_daemon_port": ("value", "daemon_port"),
"chk_allow_remote_connections": ("active", "allow_remote"),
"spin_active": ("value", "max_active_limit"),
"spin_seeding": ("value", "max_active_seeding"),
"spin_downloading": ("value", "max_active_downloading"),
"chk_dont_count_slow_torrents": ("active", "dont_count_slow_torrents"),
"chk_auto_manage_prefer_seeds": ("active", "auto_manage_prefer_seeds"),
"chk_queue_new_top": ("active", "queue_new_to_top"),
"spin_share_ratio_limit": ("value", "share_ratio_limit"),
"spin_seed_time_ratio_limit": ("value", "seed_time_ratio_limit"),
"spin_seed_time_limit": ("value", "seed_time_limit"),
"chk_seed_ratio": ("active", "stop_seed_at_ratio"),
"spin_share_ratio": ("value", "stop_seed_ratio"),
"chk_remove_ratio": ("active", "remove_seed_at_ratio"),
"spin_cache_size": ("value", "cache_size"),
"spin_cache_expiry": ("value", "cache_expiry"),
"combo_proxy_type": ("active", lambda: self.core_config["proxy"]["type"]),
"entry_proxy_user": ("text", lambda: self.core_config["proxy"]["username"]),
"entry_proxy_pass": ("text", lambda: self.core_config["proxy"]["password"]),
"entry_proxy_host": ("text", lambda: self.core_config["proxy"]["hostname"]),
"spin_proxy_port": ("value", lambda: self.core_config["proxy"]["port"]),
"chk_proxy_host_resolve": ("active", lambda: self.core_config["proxy"]["proxy_hostnames"]),
"chk_proxy_peer_conn": ("active", lambda: self.core_config["proxy"]["proxy_peer_connections"]),
"entry_i2p_host": ("text", lambda: self.core_config["i2p_proxy"]["hostname"]),
"spin_i2p_port": ("value", lambda: self.core_config["i2p_proxy"]["port"]),
"accounts_add": (None, None),
"accounts_listview": (None, None),
"button_cache_refresh": (None, None),
"button_plugin_install": (None, None),
"button_rescan_plugins": (None, None),
"button_find_plugins": (None, None),
"button_testport": (None, None),
"plugin_listview": (None, None),
}
core_widgets[self.download_location_path_chooser] = ("path_chooser", "download_location")
core_widgets[self.move_completed_path_chooser] = ("path_chooser", "move_completed_path")
core_widgets[self.copy_torrent_files_path_chooser] = ("path_chooser", "torrentfiles_location")
# Update the widgets accordingly
for key in core_widgets.keys():
modifier = core_widgets[key][0]
if type(key) is str:
widget = self.builder.get_object(key)
else:
widget = key
widget.set_sensitive(self.is_connected)
if self.is_connected:
value = core_widgets[key][1]
from types import FunctionType
if type(value) is FunctionType:
value = value()
elif type(value) is str:
value = self.core_config[value]
elif modifier:
value = {"active": False, "not_active": False, "value": 0, "text": "", "path_chooser": ""}[modifier]
if modifier == "active":
widget.set_active(value)
elif modifier == "not_active":
widget.set_active(not value)
elif modifier == "value":
widget.set_value(float(value))
elif modifier == "text":
if value is None:
value = ""
widget.set_text(value)
elif modifier == "path_chooser":
widget.set_text(value, cursor_end=False, default_text=True)
if self.is_connected:
for key in core_widgets.keys():
if type(key) is str:
widget = self.builder.get_object(key)
else:
widget = key
# Update the toggle status if necessary
self.on_toggle(widget)
# Downloads tab #
self.builder.get_object("chk_show_dialog").set_active(
self.gtkui_config["interactive_add"])
self.builder.get_object("chk_focus_dialog").set_active(
self.gtkui_config["focus_add_dialog"])
# Interface tab #
self.builder.get_object("chk_use_tray").set_active(
self.gtkui_config["enable_system_tray"])
self.builder.get_object("chk_min_on_close").set_active(
self.gtkui_config["close_to_tray"])
self.builder.get_object("chk_start_in_tray").set_active(
self.gtkui_config["start_in_tray"])
self.builder.get_object("radio_appind").set_active(
self.gtkui_config["enable_appindicator"])
self.builder.get_object("chk_lock_tray").set_active(
self.gtkui_config["lock_tray"])
self.builder.get_object("radio_classic").set_active(
self.gtkui_config["classic_mode"])
self.builder.get_object("radio_thinclient").set_active(
not self.gtkui_config["classic_mode"])
self.builder.get_object("chk_show_rate_in_title").set_active(
self.gtkui_config["show_rate_in_title"])
self.builder.get_object("chk_focus_main_window_on_add").set_active(
self.gtkui_config["focus_main_window_on_add"])
self.builder.get_object("piecesbar_toggle").set_active(
self.gtkui_config["show_piecesbar"]
)
self.__set_color("completed", from_config=True)
self.__set_color("downloading", from_config=True)
self.__set_color("waiting", from_config=True)
self.__set_color("missing", from_config=True)
# Other tab #
self.builder.get_object("chk_show_new_releases").set_active(
self.gtkui_config["show_new_releases"])
# Cache tab #
if client.connected():
self.__update_cache_status()
# Plugins tab #
all_plugins = self.all_plugins
enabled_plugins = self.enabled_plugins
# Clear the existing list so we don't duplicate entries.
self.plugin_liststore.clear()
# Iterate through the lists and add them to the liststore
for plugin in all_plugins:
if plugin in enabled_plugins:
enabled = True
else:
enabled = False
row = self.plugin_liststore.append()
self.plugin_liststore.set_value(row, 0, plugin)
self.plugin_liststore.set_value(row, 1, enabled)
self.plugin_liststore.set_value(row, 2, _(plugin))
# Now show the dialog
self.pref_dialog.show()
def set_config(self, hide=False):
"""
Sets all altered config values in the core.
:param hide: bool, if True, will not re-show the dialog and will hide it instead
"""
classic_mode_was_set = self.gtkui_config["classic_mode"]
# Get the values from the dialog
new_core_config = {}
new_gtkui_config = {}
# Downloads tab #
new_gtkui_config["interactive_add"] = \
self.builder.get_object("chk_show_dialog").get_active()
new_gtkui_config["focus_add_dialog"] = \
self.builder.get_object("chk_focus_dialog").get_active()
for state in ("missing", "waiting", "downloading", "completed"):
color = self.builder.get_object("%s_color" % state).get_color()
new_gtkui_config["pieces_color_%s" % state] = [
color.red, color.green, color.blue
]
new_core_config["copy_torrent_file"] = \
self.builder.get_object("chk_copy_torrent_file").get_active()
new_core_config["del_copy_torrent_file"] = \
self.builder.get_object("chk_del_copy_torrent_file").get_active()
new_core_config["move_completed"] = \
self.builder.get_object("chk_move_completed").get_active()
new_core_config["download_location"] = self.download_location_path_chooser.get_text()
new_core_config["move_completed_path"] = self.move_completed_path_chooser.get_text()
new_core_config["torrentfiles_location"] = self.copy_torrent_files_path_chooser.get_text()
new_core_config["prioritize_first_last_pieces"] = \
self.builder.get_object("chk_prioritize_first_last_pieces").get_active()
new_core_config["sequential_download"] = \
self.builder.get_object("chk_sequential_download").get_active()
new_core_config["add_paused"] = self.builder.get_object("chk_add_paused").get_active()
new_core_config["pre_allocate_storage"] = self.builder.get_object("chk_pre_allocation").get_active()
# Network tab #
listen_ports = (
self.builder.get_object("spin_port_min").get_value_as_int(),
self.builder.get_object("spin_port_max").get_value_as_int()
)
new_core_config["listen_ports"] = listen_ports
new_core_config["random_port"] = \
self.builder.get_object("chk_random_port").get_active()
outgoing_ports = (
self.builder.get_object("spin_outgoing_port_min").get_value_as_int(),
self.builder.get_object("spin_outgoing_port_max").get_value_as_int()
)
new_core_config["outgoing_ports"] = outgoing_ports
new_core_config["random_outgoing_ports"] = \
self.builder.get_object("chk_random_outgoing_ports").get_active()
incoming_address = self.builder.get_object("entry_interface").get_text().strip()
if deluge.common.is_ip(incoming_address) or not incoming_address:
new_core_config["listen_interface"] = incoming_address
new_core_config["peer_tos"] = self.builder.get_object("entry_peer_tos").get_text()
new_core_config["dht"] = self.builder.get_object("chk_dht").get_active()
new_core_config["upnp"] = self.builder.get_object("chk_upnp").get_active()
new_core_config["natpmp"] = \
self.builder.get_object("chk_natpmp").get_active()
new_core_config["utpex"] = \
self.builder.get_object("chk_utpex").get_active()
new_core_config["lt_tex"] = \
self.builder.get_object("chk_lt_tex").get_active()
new_core_config["lsd"] = \
self.builder.get_object("chk_lsd").get_active()
new_core_config["enc_in_policy"] = \
self.builder.get_object("combo_encin").get_active()
new_core_config["enc_out_policy"] = \
self.builder.get_object("combo_encout").get_active()
new_core_config["enc_level"] = \
self.builder.get_object("combo_enclevel").get_active()
# Bandwidth tab #
new_core_config["max_connections_global"] = \
self.builder.get_object(
"spin_max_connections_global").get_value_as_int()
new_core_config["max_download_speed"] = \
self.builder.get_object("spin_max_download").get_value()
new_core_config["max_upload_speed"] = \
self.builder.get_object("spin_max_upload").get_value()
new_core_config["max_upload_slots_global"] = \
self.builder.get_object(
"spin_max_upload_slots_global").get_value_as_int()
new_core_config["max_half_open_connections"] = \
self.builder.get_object("spin_max_half_open_connections").get_value_as_int()
new_core_config["max_connections_per_second"] = \
self.builder.get_object(
"spin_max_connections_per_second").get_value_as_int()
new_core_config["max_connections_per_torrent"] = \
self.builder.get_object(
"spin_max_connections_per_torrent").get_value_as_int()
new_core_config["max_upload_slots_per_torrent"] = \
self.builder.get_object(
"spin_max_upload_slots_per_torrent").get_value_as_int()
new_core_config["max_upload_speed_per_torrent"] = \
self.builder.get_object(
"spin_max_upload_per_torrent").get_value()
new_core_config["max_download_speed_per_torrent"] = \
self.builder.get_object(
"spin_max_download_per_torrent").get_value()
new_core_config["ignore_limits_on_local_network"] = \
self.builder.get_object("chk_ignore_limits_on_local_network").get_active()
new_core_config["rate_limit_ip_overhead"] = \
self.builder.get_object("chk_rate_limit_ip_overhead").get_active()
# Interface tab #
new_gtkui_config["enable_system_tray"] = \
self.builder.get_object("chk_use_tray").get_active()
new_gtkui_config["close_to_tray"] = \
self.builder.get_object("chk_min_on_close").get_active()
new_gtkui_config["start_in_tray"] = \
self.builder.get_object("chk_start_in_tray").get_active()
new_gtkui_config["enable_appindicator"] = \
self.builder.get_object("radio_appind").get_active()
new_gtkui_config["lock_tray"] = \
self.builder.get_object("chk_lock_tray").get_active()
passhex = sha(self.builder.get_object("txt_tray_password").get_text()).hexdigest()
if passhex != "c07eb5a8c0dc7bb81c217b67f11c3b7a5e95ffd7":
new_gtkui_config["tray_password"] = passhex
new_gtkui_in_classic_mode = self.builder.get_object("radio_classic").get_active()
new_gtkui_config["classic_mode"] = new_gtkui_in_classic_mode
new_gtkui_config["show_rate_in_title"] = \
self.builder.get_object("chk_show_rate_in_title").get_active()
new_gtkui_config["focus_main_window_on_add"] = \
self.builder.get_object("chk_focus_main_window_on_add").get_active()
# Other tab #
new_gtkui_config["show_new_releases"] = \
self.builder.get_object("chk_show_new_releases").get_active()
new_core_config["send_info"] = \
self.builder.get_object("chk_send_info").get_active()
new_core_config["geoip_db_location"] = \
self.builder.get_object("entry_geoip").get_text()
# Daemon tab #
new_core_config["daemon_port"] = \
self.builder.get_object("spin_daemon_port").get_value_as_int()
new_core_config["allow_remote"] = \
self.builder.get_object("chk_allow_remote_connections").get_active()
new_core_config["new_release_check"] = \
self.builder.get_object("chk_new_releases").get_active()
# Proxy tab #
new_core_config["proxy"] = {}
new_core_config["proxy"]["type"] = self.builder.get_object("combo_proxy_type").get_active()
new_core_config["proxy"]["username"] = self.builder.get_object("entry_proxy_user").get_text()
new_core_config["proxy"]["password"] = self.builder.get_object("entry_proxy_pass").get_text()
new_core_config["proxy"]["hostname"] = self.builder.get_object("entry_proxy_host").get_text()
new_core_config["proxy"]["port"] = self.builder.get_object("spin_proxy_port").get_value_as_int()
new_core_config["proxy"]["proxy_hostnames"] = self.builder.get_object("chk_proxy_host_resolve").get_active()
new_core_config["proxy"]["proxy_peer_connections"] = self.builder.get_object(
"chk_proxy_peer_conn").get_active()
new_core_config["i2p_proxy"] = {}
new_core_config["i2p_proxy"]["hostname"] = self.builder.get_object("entry_i2p_host").get_text()
new_core_config["i2p_proxy"]["port"] = self.builder.get_object("spin_i2p_port").get_value_as_int()
new_core_config["anonymous_mode"] = self.builder.get_object("chk_anonymous_mode").get_active()
# Queue tab #
new_core_config["queue_new_to_top"] = \
self.builder.get_object("chk_queue_new_top").get_active()
new_core_config["max_active_seeding"] = \
self.builder.get_object("spin_seeding").get_value_as_int()
new_core_config["max_active_downloading"] = \
self.builder.get_object("spin_downloading").get_value_as_int()
new_core_config["max_active_limit"] = \
self.builder.get_object("spin_active").get_value_as_int()
new_core_config["dont_count_slow_torrents"] = \
self.builder.get_object("chk_dont_count_slow_torrents").get_active()
new_core_config["auto_manage_prefer_seeds"] = \
self.builder.get_object("chk_auto_manage_prefer_seeds").get_active()
new_core_config["stop_seed_at_ratio"] = \
self.builder.get_object("chk_seed_ratio").get_active()
new_core_config["remove_seed_at_ratio"] = \
self.builder.get_object("chk_remove_ratio").get_active()
new_core_config["stop_seed_ratio"] = \
self.builder.get_object("spin_share_ratio").get_value()
new_core_config["share_ratio_limit"] = \
self.builder.get_object("spin_share_ratio_limit").get_value()
new_core_config["seed_time_ratio_limit"] = \
self.builder.get_object("spin_seed_time_ratio_limit").get_value()
new_core_config["seed_time_limit"] = \
self.builder.get_object("spin_seed_time_limit").get_value()
# Cache tab #
new_core_config["cache_size"] = \
self.builder.get_object("spin_cache_size").get_value_as_int()
new_core_config["cache_expiry"] = \
self.builder.get_object("spin_cache_expiry").get_value_as_int()
# Run plugin hook to apply preferences
component.get("PluginManager").run_on_apply_prefs()
# Lanuage
if self.language_checkbox.get_active():
new_gtkui_config["language"] = None
else:
active = self.language_combo.get_active()
if active == -1:
dialog = InformationDialog(
_("Attention"),
_("You must choose a language")
)
dialog.run()
return
else:
model = self.language_combo.get_model()
new_gtkui_config["language"] = model.get(model.get_iter(active), 0)[0]
if new_gtkui_config["language"] != self.gtkui_config["language"]:
dialog = InformationDialog(
_("Attention"),
_("You must now restart the deluge UI for the changes to take effect.")
)
dialog.run()
# GtkUI
for key in new_gtkui_config.keys():
# The values do not match so this needs to be updated
if self.gtkui_config[key] != new_gtkui_config[key]:
self.gtkui_config[key] = new_gtkui_config[key]
# Core
if client.connected():
# Only do this if we're connected to a daemon
config_to_set = {}
for key in new_core_config.keys():
# The values do not match so this needs to be updated
if self.core_config[key] != new_core_config[key]:
config_to_set[key] = new_core_config[key]
if config_to_set:
# Set each changed config value in the core
client.core.set_config(config_to_set)
client.force_call(True)
# Update the configuration
self.core_config.update(config_to_set)
if hide:
self.hide()
else:
# Re-show the dialog to make sure everything has been updated
self.show()
if classic_mode_was_set != new_gtkui_in_classic_mode:
def on_response(response):
if response == gtk.RESPONSE_YES:
shutdown_daemon = (not client.is_classicmode() and
client.connected() and
client.is_localhost())
component.get("MainWindow").quit(shutdown=shutdown_daemon)
else:
self.gtkui_config["classic_mode"] = not new_gtkui_in_classic_mode
self.builder.get_object("radio_classic").set_active(self.gtkui_config["classic_mode"])
self.builder.get_object("radio_thinclient").set_active(not self.gtkui_config["classic_mode"])
dialog = YesNoDialog(
_("Switching client mode..."),
_("Your current session will be stopped. Do you wish to continue?")
)
dialog.run().addCallback(on_response)
def hide(self):
self.window_open = False
self.builder.get_object("port_img").hide()
self.pref_dialog.hide()
def __update_cache_status(self):
# Updates the cache status labels with the info in the dict
for widget_name in ('label_cache_blocks_written', 'label_cache_writes', 'label_cache_write_hit_ratio',
'label_cache_blocks_read', 'label_cache_blocks_read_hit', 'label_cache_read_hit_ratio',
'label_cache_reads', 'label_cache_cache_size', 'label_cache_read_cache_size'):
widget = self.builder.get_object(widget_name)
key = widget_name[len("label_cache_"):]
value = self.cache_status[key]
if type(value) == float:
value = "%.2f" % value
else:
value = str(value)
widget.set_text(value)
def _on_button_cache_refresh_clicked(self, widget):
def on_get_cache_status(status):
self.cache_status = status
self.__update_cache_status()
client.core.get_cache_status().addCallback(on_get_cache_status)
def on_pref_dialog_delete_event(self, widget, event):
self.hide()
return True
def load_pref_dialog_state(self):
w = self.gtkui_config["pref_dialog_width"]
h = self.gtkui_config["pref_dialog_height"]
if w is not None and h is not None:
self.pref_dialog.resize(w, h)
def on_pref_dialog_configure_event(self, widget, event):
self.gtkui_config["pref_dialog_width"] = event.width
self.gtkui_config["pref_dialog_height"] = event.height
def on_toggle(self, widget):
"""Handles widget sensitivity based on radio/check button values."""
try:
value = widget.get_active()
except:
return
path_choosers = {"download_location_path_chooser": self.download_location_path_chooser,
"move_completed_path_chooser": self.move_completed_path_chooser,
"torrentfiles_location_path_chooser": self.copy_torrent_files_path_chooser
}
dependents = {
"chk_show_dialog": {"chk_focus_dialog": True},
"chk_random_port": {"spin_port_min": False,
"spin_port_max": False},
"chk_random_outgoing_ports": {"spin_outgoing_port_min": False,
"spin_outgoing_port_max": False},
"chk_use_tray": {"chk_min_on_close": True,
"chk_start_in_tray": True,
"alignment_tray_type": True,
"chk_lock_tray": True},
"chk_lock_tray": {"txt_tray_password": True,
"password_label": True},
"radio_open_folder_custom": {"combo_file_manager": False,
"txt_open_folder_location": True},
"chk_move_completed": {"move_completed_path_chooser": True},
"chk_copy_torrent_file": {"torrentfiles_location_path_chooser": True,
"chk_del_copy_torrent_file": True},
"chk_seed_ratio": {"spin_share_ratio": True,
"chk_remove_ratio": True}
}
def update_dependent_widgets(name, value):
dependency = dependents[name]
for dep in dependency.keys():
if dep in path_choosers:
depwidget = path_choosers[dep]
else:
depwidget = self.builder.get_object(dep)
sensitive = [not value, value][dependency[dep]]
depwidget.set_sensitive(sensitive and self.is_connected)
if dep in dependents:
update_dependent_widgets(dep, depwidget.get_active() and sensitive)
for key in dependents.keys():
if widget != self.builder.get_object(key):
continue
update_dependent_widgets(key, value)
def on_button_ok_clicked(self, data):
log.debug("on_button_ok_clicked")
self.set_config(hide=True)
return True
def on_button_apply_clicked(self, data):
log.debug("on_button_apply_clicked")
self.set_config()
def on_button_cancel_clicked(self, data):
log.debug("on_button_cancel_clicked")
self.hide()
return True
def on_selection_changed(self, treeselection):
# Show the correct notebook page based on what row is selected.
(model, row) = treeselection.get_selected()
try:
if model.get_value(row, 1) == _("Daemon"):
# Let's see update the accounts related stuff
if client.connected():
self._get_accounts_tab_data()
self.notebook.set_current_page(model.get_value(row, 0))
except TypeError:
pass
def on_test_port_clicked(self, data):
log.debug("on_test_port_clicked")
def on_get_test(status):
if status:
self.builder.get_object("port_img").set_from_stock(gtk.STOCK_YES, 4)
self.builder.get_object("port_img").show()
else:
self.builder.get_object("port_img").set_from_stock(gtk.STOCK_DIALOG_WARNING, 4)
self.builder.get_object("port_img").show()
client.core.test_listen_port().addCallback(on_get_test)
# XXX: Consider using gtk.Spinner() instead of the loading gif
# It requires gtk.ver > 2.12
self.builder.get_object("port_img").set_from_file(
deluge.common.get_pixmap('loading.gif')
)
self.builder.get_object("port_img").show()
client.force_call()
def on_plugin_toggled(self, renderer, path):
log.debug("on_plugin_toggled")
row = self.plugin_liststore.get_iter_from_string(path)
name = self.plugin_liststore.get_value(row, 0)
value = self.plugin_liststore.get_value(row, 1)
self.plugin_liststore.set_value(row, 1, not value)
if not value:
client.core.enable_plugin(name)
else:
client.core.disable_plugin(name)
def on_plugin_selection_changed(self, treeselection):
log.debug("on_plugin_selection_changed")
(model, itr) = treeselection.get_selected()
if not itr:
return
name = model[itr][0]
plugin_info = component.get("PluginManager").get_plugin_info(name)
self.builder.get_object("label_plugin_author").set_text(plugin_info["Author"])
self.builder.get_object("label_plugin_version").set_text(plugin_info["Version"])
self.builder.get_object("label_plugin_email").set_text(plugin_info["Author-email"])
self.builder.get_object("label_plugin_homepage").set_text(plugin_info["Home-page"])
self.builder.get_object("label_plugin_details").set_text(plugin_info["Description"])
def _on_button_plugin_install_clicked(self, widget):
log.debug("_on_button_plugin_install_clicked")
chooser = gtk.FileChooserDialog(
_("Select the Plugin"),
self.pref_dialog,
gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN,
gtk.RESPONSE_OK)
)
chooser.set_transient_for(self.pref_dialog)
chooser.set_select_multiple(False)
chooser.set_property("skip-taskbar-hint", True)
file_filter = gtk.FileFilter()
file_filter.set_name(_("Plugin Eggs"))
file_filter.add_pattern("*." + "egg")
chooser.add_filter(file_filter)
# Run the dialog
response = chooser.run()
if response == gtk.RESPONSE_OK:
filepath = chooser.get_filename()
else:
chooser.destroy()
return
import base64
import shutil
import os.path
filename = os.path.split(filepath)[1]
shutil.copyfile(
filepath,
os.path.join(get_config_dir(), "plugins", filename))
component.get("PluginManager").scan_for_plugins()
if not client.is_localhost():
# We need to send this plugin to the daemon
filedump = base64.encodestring(open(filepath, "rb").read())
client.core.upload_plugin(filename, filedump)
client.core.rescan_plugins()
chooser.destroy()
# We need to re-show the preferences dialog to show the new plugins
self.show()
def _on_button_rescan_plugins_clicked(self, widget):
component.get("PluginManager").scan_for_plugins()
if client.connected():
client.core.rescan_plugins()
self.show()
def _on_button_find_plugins_clicked(self, widget):
deluge.common.open_url_in_browser("http://dev.deluge-torrent.org/wiki/Plugins")
def _on_combo_encryption_changed(self, widget):
combo_encin = self.builder.get_object("combo_encin").get_active()
combo_encout = self.builder.get_object("combo_encout").get_active()
combo_enclevel = self.builder.get_object("combo_enclevel")
# If incoming and outgoing both set to disabled, disable level combobox
if combo_encin == 2 and combo_encout == 2:
combo_enclevel.set_sensitive(False)
elif self.is_connected:
combo_enclevel.set_sensitive(True)
def _on_combo_proxy_type_changed(self, widget):
proxy_type = self.builder.get_object("combo_proxy_type").get_active()
hides = []
shows = []
# 0:"None"
if proxy_type == 0:
hides.extend(["entry_proxy_pass", "entry_proxy_user", "entry_proxy_host", "spin_proxy_port",
"label_proxy_pass", "label_proxy_user", "label_proxy_host", "label_proxy_port",
"chk_proxy_host_resolve", "chk_proxy_peer_conn"])
# 1:"Socks4", 2:"Socks5", 4:"HTTP"
elif proxy_type in (1, 2, 4):
if proxy_type in (2, 4):
shows.extend(["chk_proxy_host_resolve"])
hides.extend(["entry_proxy_pass", "entry_proxy_user", "label_proxy_pass", "label_proxy_user"])
shows.extend(["entry_proxy_host", "spin_proxy_port", "label_proxy_host",
"label_proxy_port", "chk_proxy_peer_conn"])
# 3:"Socks5 Auth", 5:"HTTP Auth"
elif proxy_type in (3, 5):
shows.extend(["entry_proxy_pass", "entry_proxy_user", "entry_proxy_host", "spin_proxy_port",
"label_proxy_pass", "label_proxy_user", "label_proxy_host", "label_proxy_port",
"chk_proxy_host_resolve", "chk_proxy_peer_conn"])
for hide_entry in hides:
self.builder.get_object(hide_entry).hide()
for show_entry in shows:
self.builder.get_object(show_entry).show()
def _on_button_associate_magnet_clicked(self, widget):
associate_magnet_links(True)
def _get_accounts_tab_data(self):
def on_ok(accounts):
self.accounts_frame.show()
self._on_get_known_accounts(accounts)
def on_fail(failure):
if failure.type == NotAuthorizedError:
self.accounts_frame.hide()
else:
ErrorDialog(
_("Server Side Error"),
_("An error ocurred on the server"),
parent=self.pref_dialog, details=failure.getErrorMessage()
).run()
client.core.get_known_accounts().addCallback(on_ok).addErrback(on_fail)
def _on_get_known_accounts(self, known_accounts):
known_accounts_to_log = []
for account in known_accounts:
account_to_log = {}
for key, value in account.copy().iteritems():
if key == 'password':
value = '*' * len(value)
account_to_log[key] = value
known_accounts_to_log.append(account_to_log)
log.debug("_on_known_accounts: %s", known_accounts_to_log)
self.accounts_liststore.clear()
for account in known_accounts:
iter = self.accounts_liststore.append()
self.accounts_liststore.set_value(
iter, ACCOUNTS_USERNAME, account['username']
)
self.accounts_liststore.set_value(
iter, ACCOUNTS_LEVEL, account['authlevel']
)
self.accounts_liststore.set_value(
iter, ACCOUNTS_PASSWORD, account['password']
)
def _on_accounts_selection_changed(self, treeselection):
log.debug("_on_accounts_selection_changed")
(model, itr) = treeselection.get_selected()
if not itr:
return
username = model[itr][0]
if username:
self.builder.get_object("accounts_edit").set_sensitive(True)
self.builder.get_object("accounts_delete").set_sensitive(True)
else:
self.builder.get_object("accounts_edit").set_sensitive(False)
self.builder.get_object("accounts_delete").set_sensitive(False)
def _on_accounts_add_clicked(self, widget):
dialog = AccountDialog(
levels_mapping=client.auth_levels_mapping,
parent=self.pref_dialog
)
def dialog_finished(response_id):
username = dialog.get_username()
password = dialog.get_password()
authlevel = dialog.get_authlevel()
def add_ok(rv):
iter = self.accounts_liststore.append()
self.accounts_liststore.set_value(
iter, ACCOUNTS_USERNAME, username
)
self.accounts_liststore.set_value(
iter, ACCOUNTS_LEVEL, authlevel
)
self.accounts_liststore.set_value(
iter, ACCOUNTS_PASSWORD, password
)
def add_fail(failure):
if failure.type == AuthManagerError:
ErrorDialog(
_("Error Adding Account"),
_("Authentication failed"),
parent=self.pref_dialog, details=failure.getErrorMessage()
).run()
else:
ErrorDialog(
_("Error Adding Account"),
_("An error ocurred while adding account"),
parent=self.pref_dialog, details=failure.getErrorMessage()
).run()
if response_id == gtk.RESPONSE_OK:
client.core.create_account(
username, password, authlevel
).addCallback(add_ok).addErrback(add_fail)
dialog.run().addCallback(dialog_finished)
def _on_accounts_edit_clicked(self, widget):
(model, itr) = self.accounts_listview.get_selection().get_selected()
if not itr:
return
dialog = AccountDialog(
model[itr][ACCOUNTS_USERNAME],
model[itr][ACCOUNTS_PASSWORD],
model[itr][ACCOUNTS_LEVEL],
levels_mapping=client.auth_levels_mapping,
parent=self.pref_dialog
)
def dialog_finished(response_id):
def update_ok(rc):
model.set_value(itr, ACCOUNTS_PASSWORD, dialog.get_username())
model.set_value(itr, ACCOUNTS_LEVEL, dialog.get_authlevel())
def update_fail(failure):
ErrorDialog(
_("Error Updating Account"),
_("An error ocurred while updating account"),
parent=self.pref_dialog, details=failure.getErrorMessage()
).run()
if response_id == gtk.RESPONSE_OK:
client.core.update_account(
dialog.get_username(),
dialog.get_password(),
dialog.get_authlevel()
).addCallback(update_ok).addErrback(update_fail)
dialog.run().addCallback(dialog_finished)
def _on_accounts_delete_clicked(self, widget):
(model, itr) = self.accounts_listview.get_selection().get_selected()
if not itr:
return
username = model[itr][0]
header = _("Remove Account")
text = _("Are you sure you wan't do remove the account with the "
"username \"%(username)s\"?" % dict(username=username))
dialog = YesNoDialog(header, text, parent=self.pref_dialog)
def dialog_finished(response_id):
def remove_ok(rc):
model.remove(itr)
def remove_fail(failure):
if failure.type == AuthManagerError:
ErrorDialog(
_("Error Removing Account"),
_("Auhentication failed"),
parent=self.pref_dialog, details=failure.getErrorMessage()
).run()
else:
ErrorDialog(
_("Error Removing Account"),
_("An error ocurred while removing account"),
parent=self.pref_dialog, details=failure.getErrorMessage()
).run()
if response_id == gtk.RESPONSE_YES:
client.core.remove_account(
username
).addCallback(remove_ok).addErrback(remove_fail)
dialog.run().addCallback(dialog_finished)
def _on_piecesbar_toggle_toggled(self, widget):
self.gtkui_config['show_piecesbar'] = widget.get_active()
colors_widget = self.builder.get_object("piecebar_colors_expander")
colors_widget.set_visible(widget.get_active())
def _on_checkbutton_language_toggled(self, widget):
self.language_combo.set_visible(not self.language_checkbox.get_active())
def _on_completed_color_set(self, widget):
self.__set_color("completed")
def _on_revert_color_completed_clicked(self, widget):
self.__revert_color("completed")
def _on_downloading_color_set(self, widget):
self.__set_color("downloading")
def _on_revert_color_downloading_clicked(self, widget):
self.__revert_color("downloading")
def _on_waiting_color_set(self, widget):
self.__set_color("waiting")
def _on_revert_color_waiting_clicked(self, widget):
self.__revert_color("waiting")
def _on_missing_color_set(self, widget):
self.__set_color("missing")
def _on_revert_color_missing_clicked(self, widget):
self.__revert_color("missing")
def __set_color(self, state, from_config=False):
if from_config:
color = gtk.gdk.Color(*self.gtkui_config["pieces_color_%s" % state])
log.debug("Setting %r color state from config to %s", state,
(color.red, color.green, color.blue))
self.builder.get_object("%s_color" % state).set_color(color)
else:
color = self.builder.get_object("%s_color" % state).get_color()
log.debug("Setting %r color state to %s", state,
(color.red, color.green, color.blue))
self.gtkui_config["pieces_color_%s" % state] = [
color.red, color.green, color.blue
]
self.gtkui_config.save()
self.gtkui_config.apply_set_functions("pieces_colors")
self.builder.get_object("revert_color_%s" % state).set_sensitive(
[color.red, color.green, color.blue] != self.COLOR_DEFAULTS[state]
)
def __revert_color(self, state, from_config=False):
log.debug("Reverting %r color state", state)
self.builder.get_object("%s_color" % state).set_color(
gtk.gdk.Color(*self.COLOR_DEFAULTS[state])
)
self.builder.get_object("revert_color_%s" % state).set_sensitive(False)
self.gtkui_config.apply_set_functions("pieces_colors")
|
bendykst/deluge
|
deluge/ui/gtkui/preferences.py
|
Python
|
gpl-3.0
| 58,154 | 0.002167 |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.views.generic.base import View, TemplateView
from django.views import generic
from django.shortcuts import render_to_response
from django.template import RequestContext, loader, Context
from django.utils import simplejson
from settings import CLARUS_DBCONN_STRING
import psycopg2, sys, pprint, json
from datetime import datetime
from django.views.decorators.csrf import ensure_csrf_cookie
import logging
from array import *
# logger = logging.getLogger('print')
class LoadCanvas(View):
template_name= "index.html"
def get(self, request, *args, **kwargs):
c = {}
c.update(csrf(request))
return render_to_response(self.template_name, c)
class FetchObservations(View):
# template_name = "timeline/timelines.html"
obsType = array('i'); # 575
startTime = '' ;
stationID ='';
#@route ('/observe', method='POST')
def post(self, request, *args, **kwargs):
try:
json_data = simplejson.loads(request.body)
print 'Raw Data: "%s"' % request.body
# print json_data
self.startTime = str(json_data['startTime'])
# self.startTime = '2013-07-09 00:00:00';
# print self.startTime
self.stationID = json_data['stationID']
stationList = ",".join([str(x) for x in self.stationID])
# print stationList
self.obsType = json_data['obsType']
# print self.obsType
conn_string = CLARUS_DBCONN_STRING # get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string) # conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
r = [];
# print self.obsType[0]
# print self.obsType[1]
for obs in self.obsType:
# execute our Query
data = {}
obsStr = str(obs)
cursor.execute("SELECT "+
"TO_CHAR( (date_trunc('hour', tstamp) + INTERVAL '15 min' * ROUND(date_part('minute', tstamp) / 15.0)), 'YYYY-MM-DD HH24:MI' ) AS tstamp, "+
"AVG( metric_value ) AS metric_value " +
"FROM clarus.observation, clarus.sensor "+
"WHERE clarus.observation.sensor_id=clarus.sensor.sensor_id "+
"AND station_id IN (" + stationList + ") AND observation_type = " + obsStr + " "+
"AND tstamp >= (timestamp '"+self.startTime+"' - INTERVAL '1 week') AND tstamp < timestamp '"+self.startTime+"' " +
"GROUP BY date_trunc('hour', tstamp) + INTERVAL '15 min' * ROUND(date_part('minute', tstamp) / 15.0) "+
"ORDER BY tstamp asc" );
data['rows'] = [dict((cursor.description[i][0], value)
for i, value in enumerate(row)) for row in cursor.fetchall()]
# this query is no longer needed as the metadata is all loaded separately
#cursor.execute("SELECT name, description, metric_abbreviation "+
# "FROM clarus.observation_type_lkp "+
# "WHERE clarus.observation_type_lkp.observation_type= "+ obs +"");
#data['title'] = ([dict((cursor.description[i][0], value)
# for i, value in enumerate(row)) for row in cursor.fetchall()])
data['title'] = obs
r.append(data);
cursor.connection.close();
# now process it
json_output = simplejson.dumps(r)
return HttpResponse(json_output, content_type="application/json")
except:
return HttpResponse("<h1>Error in running query</h1>")
# logger.error('Getting observation data failed')
|
awalin/rwis
|
views.py
|
Python
|
lgpl-3.0
| 4,395 | 0.020023 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models import DAG
from airflow.operators.bash import BashOperator
DEFAULT_DATE = datetime(2016, 1, 1)
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(dag_id='test_heartbeat_failed_fast', default_args=args)
task = BashOperator(task_id='test_heartbeat_failed_fast_op', bash_command='sleep 7', dag=dag)
|
airbnb/airflow
|
tests/dags/test_heartbeat_failed_fast.py
|
Python
|
apache-2.0
| 1,163 | 0.00086 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
LEADER = 1
ROUTER = 2
ED2 = 3
SED2 = 4
MTDS = [SED2, ED2]
class Cert_7_1_4_BorderRouterAsRouter(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i, (i in MTDS), simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[SED2].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
self.nodes[ED2].set_panid(0xface)
self.nodes[ED2].set_mode('rsn')
self.nodes[ED2].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[ED2].enable_whitelist()
self.nodes[SED2].set_panid(0xface)
self.nodes[SED2].set_mode('s')
self.nodes[SED2].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[SED2].enable_whitelist()
self.nodes[SED2].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[SED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED2].get_state(), 'child')
self.nodes[ROUTER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[ROUTER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[ROUTER].register_netdata()
self.simulator.go(5)
addrs = self.nodes[ED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
gandreello/openthread
|
tests/scripts/thread-cert/Cert_7_1_04_BorderRouterAsRouter.py
|
Python
|
bsd-3-clause
| 4,738 | 0.000633 |
# -*- coding: utf-8 -*-
from rest_framework.routers import (Route,
DynamicDetailRoute,
SimpleRouter,
DynamicListRoute)
from app.api.account.views import AccountViewSet
from app.api.podcast.views import PodcastViewSet, EpisodeViewSet
class CustomRouter(SimpleRouter):
"""
A router for read-only APIs, which doesn't use trailing slashes.
"""
routes = [
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Dynamically generated list routes.
# Generated using @list_route decorator
# on methods of the viewset.
DynamicListRoute(
url=r'^{prefix}/{methodnamehyphen}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes.
# Generated using @detail_route decorator on methods of the viewset.
DynamicDetailRoute(
url=r'^{prefix}/{lookup}/{methodnamehyphen}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
]
router = CustomRouter()
router.register(r'accounts', AccountViewSet)
router.register(r'podcasts', PodcastViewSet)
router.register(r'episodes', EpisodeViewSet)
urlpatterns = router.urls
|
Podcastor/podcastor-backend
|
src/app/api/urls.py
|
Python
|
gpl-2.0
| 1,923 | 0 |
from ..trezor.qt_generic import QtPlugin
from keepkey import KeepKeyPlugin
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = ":icons/keepkey.png"
icon_unpaired = ":icons/keepkey_unpaired.png"
@classmethod
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
|
pknight007/electrum-vtc
|
plugins/keepkey/vtc.py
|
Python
|
mit
| 353 | 0 |
__author__ = 'Jason Grundstad'
from django.conf import settings
from pyvirtualdisplay import Display
from selenium import webdriver
from bs4 import BeautifulSoup
import json
MD_ANDERSON_URL = 'https://pct.mdanderson.org/#/home'
MD_ANDERSON_OUTFILE = settings.LINKS_OUT + 'mdanderson.json'
def scrape_mdanderson():
"""
Scrape the rendered mdanderson page for gene names, create a .json
of links
:rtype : dict
"""
gene_list = dict()
d = Display(visible=0, size=(800,600)) # requires xvfb for headless mode
d.start()
driver = webdriver.Firefox()
driver.get(MD_ANDERSON_URL)
soup = BeautifulSoup(driver.page_source, 'html.parser')
for a_tag in soup.find_all("a", {'class':'ng-binding'}):
gene_list[a_tag.text] = "{}/{}?section=Overview".format(
MD_ANDERSON_URL,
a_tag.text)
gene_list_json = json.dumps(gene_list)
with open(MD_ANDERSON_OUTFILE, 'w') as f:
json.dump(gene_list_json, f)
def main():
scrape_mdanderson()
if __name__ == '__main__':
main()
|
djf604/viewer
|
util/link_out_scraper.py
|
Python
|
apache-2.0
| 1,060 | 0.003774 |
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import pymongo
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
# put your mongodb username and password
# "mongodb://username:password@staff.mongohq.com:someport/mongodb_name"
# following is obtained from https://app.mongohq.com/username/mongo/mongodbname/admin
MONGOHQ_URL = "mongodb://avi:test@paulo.mongohq.com:10065/testme"
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/(\w+)", WordHandler)]
# conn = pymongo.Connection("localhost", 27017)
# self.db = conn["definitions"]
conn = pymongo.Connection(MONGOHQ_URL)
self.db = conn.testme
tornado.web.Application.__init__(self, handlers, debug=True)
class WordHandler(tornado.web.RequestHandler):
def get(self, word):
coll = self.application.db.words
word_doc = coll.find_one({"word": word})
if word_doc:
del word_doc["_id"]
self.write(word_doc)
else:
self.set_status(404)
def post(self, word):
definition = self.get_argument("definition")
coll = self.application.db.words
word_doc = coll.find_one({"word": word})
if word_doc:
word_doc['definition'] = definition
coll.save(word_doc)
else:
word_doc = {'word': word, 'definition': definition}
coll.insert(word_doc)
del word_doc["_id"]
self.write(word_doc)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
avinassh/learning-tornado
|
tornado-book/databases/definitions_readwrite.py
|
Python
|
mit
| 1,628 | 0.023956 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
import numpy as np
from .rnn_base import RNNBase
from .utils import SymbolTable
class TextRNN(RNNBase):
"""TextRNN for strings of text."""
def _preprocess_data(self, candidates, extend=False):
"""Convert candidate sentences to lookup sequences
:param candidates: candidates to process
:param extend: extend symbol table for tokens (train), or lookup (test)?
"""
if not hasattr(self, 'word_dict'):
self.word_dict = SymbolTable()
data, ends = [], []
for candidate in candidates:
toks = candidate.get_contexts()[0].text.split()
# Either extend word table or retrieve from it
f = self.word_dict.get if extend else self.word_dict.lookup
data.append(np.array(list(map(f, toks))))
ends.append(len(toks))
return data, ends
|
HazyResearch/snorkel
|
snorkel/learning/tensorflow/rnn/text_rnn.py
|
Python
|
apache-2.0
| 1,044 | 0.001916 |
"""
WSGI config for ngs project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "ngs.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ngs.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
eco32i/ngs
|
ngs/wsgi.py
|
Python
|
mit
| 1,410 | 0.000709 |
from setuptools import setup
setup(
name='pymail365',
version='0.1',
description='A python client for sending mail using Microsoft Office 365 rest service.',
long_description=open('README.rst').read(),
author='Mikko Hellsing',
author_email='mikko@aino.se',
license='BSD',
url='https://github.com/aino/pymail365',
packages=['pymail365'],
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
aino/pymail365
|
setup.py
|
Python
|
bsd-3-clause
| 688 | 0.001453 |
# -*- coding: utf-8 -*-
from i3pystatus import Module
from threading import Thread
import i3ipc
class WindowTitle(Module):
"""
Display the current window title with async update.
Uses asynchronous update via i3 IPC events.
Provides instant title update only when it required.
fork from window_tile_async of py3status by Anon1234 https://github.com/Anon1234
Requires the PyPI package `i3ipc`.
.. rubric:: Available formaters
* `{title}` — title of current focused window
* `{class_name}` - name of application class
@author jok
@license BSD
"""
settings = (
("format", "format string."),
("always_show", "do not hide the title when it can be already visible"),
("empty_title", "string that will be shown instead of the title when the title is hidden"),
("max_width", "maximum width of title"),
("color", "text color"),
)
format = "{title}"
always_show = False
empty_title = ""
max_width = 79
color = "#FFFFFF"
def init(self):
self.title = self.empty_title
self.output = {
"full_text": self.title,
"color": self.color,
}
# we are listening to i3 events in a separate thread
t = Thread(target=self._loop)
t.daemon = True
t.start()
def get_title(self, conn):
tree = conn.get_tree()
w = tree.find_focused()
p = w.parent
# don't show window title when the window already has means
# to display it
if (not self.always_show
and (w.border == "normal"
or w.type == "workspace"
or (p.layout in ("stacked", "tabbed") and len(p.nodes) > 1))):
return self.empty_title
else:
title = w.name
class_name = w.window_class
if len(title) > self.max_width:
title = title[:self.max_width - 1] + "…"
return self.format.format(title=title, class_name=class_name)
def update_title(self, conn, e):
# catch only focused window title updates
title_changed = hasattr(e, "container") and e.container.focused
# check if we need to update title due to changes
# in the workspace layout
layout_changed = (
hasattr(e, "binding")
and (e.binding.command.startswith("layout")
or e.binding.command.startswith("move container")
or e.binding.command.startswith("border"))
)
if title_changed or layout_changed:
self.title = self.get_title(conn)
self.update_display()
def clear_title(self, *args):
self.title = self.empty_title
self.update_display()
def update_display(self):
self.output = {
"full_text": self.title,
"color": self.color,
}
def _loop(self):
conn = i3ipc.Connection()
self.title = self.get_title(conn) # set title on startup
self.update_display()
# The order of following callbacks is important!
# clears the title on empty ws
conn.on('workspace::focus', self.clear_title)
# clears the title when the last window on ws was closed
conn.on("window::close", self.clear_title)
# listens for events which can trigger the title update
conn.on("window::title", self.update_title)
conn.on("window::focus", self.update_title)
conn.main() # run the event loop
|
drwahl/i3pystatus
|
i3pystatus/window_title.py
|
Python
|
mit
| 3,544 | 0.000847 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/pdfimages.py
__version__=''' $Id$ '''
__doc__="""
Image functionality sliced out of canvas.py for generalization
"""
import os
import string
from types import StringType
import reportlab
from reportlab import rl_config
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.lib.utils import fp_str, getStringIO
from reportlab.lib.utils import import_zlib, haveImages
from reportlab.lib.boxstuff import aspectRatioFix
class PDFImage:
"""Wrapper around different "image sources". You can make images
from a PIL Image object, a filename (in which case it uses PIL),
an image we previously cached (optimisation, hardly used these
days) or a JPEG (which PDF supports natively)."""
def __init__(self, image, x,y, width=None, height=None, caching=0):
self.image = image
self.x = x
self.y = y
self.width = width
self.height = height
self.filename = None
self.imageCaching = caching
# the following facts need to be determined,
# whatever the source. Declare what they are
# here for clarity.
self.colorSpace = 'DeviceRGB'
self.bitsPerComponent = 8
self.filters = []
self.source = None # JPEG or PIL, set later
self.getImageData()
def jpg_imagedata(self):
#directly process JPEG files
#open file, needs some error handling!!
fp = open(self.image, 'rb')
try:
result = self._jpg_imagedata(fp)
finally:
fp.close()
return result
def _jpg_imagedata(self,imageFile):
info = pdfutils.readJPEGInfo(imageFile)
self.source = 'JPEG'
imgwidth, imgheight = info[0], info[1]
if info[2] == 1:
colorSpace = 'DeviceGray'
elif info[2] == 3:
colorSpace = 'DeviceRGB'
else: #maybe should generate an error, is this right for CMYK?
colorSpace = 'DeviceCMYK'
imageFile.seek(0) #reset file pointer
imagedata = []
#imagedata.append('BI /Width %d /Height /BitsPerComponent 8 /ColorSpace /%s /Filter [/Filter [ /ASCII85Decode /DCTDecode] ID' % (info[0], info[1], colorSpace))
imagedata.append('BI /W %d /H %d /BPC 8 /CS /%s /F [%s/DCT] ID' % (imgwidth, imgheight, colorSpace, rl_config.useA85 and '/A85 ' or ''))
#write in blocks of (??) 60 characters per line to a list
data = imageFile.read()
if rl_config.useA85:
data = pdfutils._AsciiBase85Encode(data)
pdfutils._chunker(data,imagedata)
imagedata.append('EI')
return (imagedata, imgwidth, imgheight)
def cache_imagedata(self):
image = self.image
if not pdfutils.cachedImageExists(image):
zlib = import_zlib()
if not zlib: return
if not haveImages: return
pdfutils.cacheImageFile(image)
#now we have one cached, slurp it in
cachedname = os.path.splitext(image)[0] + (rl_config.useA85 and '.a85' or '.bin')
imagedata = open(cachedname,'rb').readlines()
#trim off newlines...
imagedata = map(string.strip, imagedata)
return imagedata
def PIL_imagedata(self):
image = self.image
if image.format=='JPEG':
fp=image.fp
fp.seek(0)
return self._jpg_imagedata(fp)
self.source = 'PIL'
zlib = import_zlib()
if not zlib: return
bpc = 8
# Use the colorSpace in the image
if image.mode == 'CMYK':
myimage = image
colorSpace = 'DeviceCMYK'
bpp = 4
elif image.mode == '1':
myimage = image
colorSpace = 'DeviceGray'
bpp = 1
bpc = 1
elif image.mode == 'L':
myimage = image
colorSpace = 'DeviceGray'
bpp = 1
else:
myimage = image.convert('RGB')
colorSpace = 'RGB'
bpp = 3
imgwidth, imgheight = myimage.size
# this describes what is in the image itself
# *NB* according to the spec you can only use the short form in inline images
imagedata=['BI /W %d /H %d /BPC %d /CS /%s /F [%s/Fl] ID' % (imgwidth, imgheight, bpc, colorSpace, rl_config.useA85 and '/A85 ' or '')]
#use a flate filter and, optionally, Ascii Base 85 to compress
raw = myimage.tostring()
rowstride = (imgwidth*bpc*bpp+7)/8
assert len(raw) == rowstride*imgheight, "Wrong amount of data for image"
data = zlib.compress(raw) #this bit is very fast...
if rl_config.useA85:
data = pdfutils._AsciiBase85Encode(data) #...sadly this may not be
#append in blocks of 60 characters
pdfutils._chunker(data,imagedata)
imagedata.append('EI')
return (imagedata, imgwidth, imgheight)
def non_jpg_imagedata(self,image):
if not self.imageCaching:
imagedata = pdfutils.cacheImageFile(image,returnInMemory=1)
else:
imagedata = self.cache_imagedata()
words = string.split(imagedata[1])
imgwidth = string.atoi(words[1])
imgheight = string.atoi(words[3])
return imagedata, imgwidth, imgheight
def getImageData(self,preserveAspectRatio=False):
"Gets data, height, width - whatever type of image"
image = self.image
if type(image) == StringType:
self.filename = image
if os.path.splitext(image)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']:
try:
imagedata, imgwidth, imgheight = self.jpg_imagedata()
except:
imagedata, imgwidth, imgheight = self.non_jpg_imagedata(image) #try for normal kind of image
else:
imagedata, imgwidth, imgheight = self.non_jpg_imagedata(image)
else:
import sys
if sys.platform[0:4] == 'java':
#jython, PIL not available
imagedata, imgwidth, imgheight = self.JAVA_imagedata()
else:
imagedata, imgwidth, imgheight = self.PIL_imagedata()
self.imageData = imagedata
self.imgwidth = imgwidth
self.imgheight = imgheight
self.width = self.width or imgwidth
self.height = self.height or imgheight
def drawInlineImage(self, canvas, preserveAspectRatio=False,anchor='sw'):
"""Draw an Image into the specified rectangle. If width and
height are omitted, they are calculated from the image size.
Also allow file names as well as images. This allows a
caching mechanism"""
width = self.width
height = self.height
if width<1e-6 or height<1e-6: return False
x,y,self.width,self.height, scaled = aspectRatioFix(preserveAspectRatio,anchor,self.x,self.y,width,height,self.imgwidth,self.imgheight)
# this says where and how big to draw it
if not canvas.bottomup: y = y+height
canvas._code.append('q %s 0 0 %s cm' % (fp_str(self.width), fp_str(self.height, x, y)))
# self._code.extend(imagedata) if >=python-1.5.2
for line in self.imageData:
canvas._code.append(line)
canvas._code.append('Q')
return True
def format(self, document):
"""Allow it to be used within pdfdoc framework. This only
defines how it is stored, not how it is drawn later."""
dict = pdfdoc.PDFDictionary()
dict['Type'] = '/XObject'
dict['Subtype'] = '/Image'
dict['Width'] = self.width
dict['Height'] = self.height
dict['BitsPerComponent'] = 8
dict['ColorSpace'] = pdfdoc.PDFName(self.colorSpace)
content = string.join(self.imageData[3:-1], '\n') + '\n'
strm = pdfdoc.PDFStream(dictionary=dict, content=content)
return strm.format(document)
if __name__=='__main__':
srcfile = os.path.join(
os.path.dirname(reportlab.__file__),
'test',
'pythonpowered.gif'
)
assert os.path.isfile(srcfile), 'image not found'
pdfdoc.LongFormat = 1
img = PDFImage(srcfile, 100, 100)
import pprint
doc = pdfdoc.PDFDocument()
print 'source=',img.source
print img.format(doc)
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/thirdparty/src/reportlab/pdfgen/pdfimages.py
|
Python
|
gpl-3.0
| 8,517 | 0.009041 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Handles stopping of an active session."""
from absl import logging
from api import model_selector
from api import unique_id
import common.generate_protos # pylint: disable=unused-import
from data_store import resource_store
import data_store_pb2
import falken_service_pb2
from google.rpc import code_pb2
import session_pb2
def stop_session(request, context, data_store):
"""Stops an active session.
Args:
request: falken_service_pb2.StopSessionRequest containing information
about the session requested to be stopped.
context: grpc.ServicerContext containing context about the RPC.
data_store: data_store.DataStore object to update the session.
Returns:
falken_service_pb2.StopSessionResponse containing the snapshot_id for the
session.
Raises:
Exception: The gRPC context is aborted when the session is not found in
data_store, or other issues occur in the handling.
"""
logging.debug('StopSession called for project_id %s with session %s.',
request.project_id, request.session.name)
session = None
session_resource_id = data_store.resource_id_from_proto_ids(
project_id=request.session.project_id,
brain_id=request.session.brain_id,
session_id=request.session.name)
try:
session = data_store.read(session_resource_id)
except FileNotFoundError as e:
context.abort(
code_pb2.NOT_FOUND,
f'Failed to find session {session_resource_id.session} in data_store. '
f'{e}')
selector = model_selector.ModelSelector(data_store, session_resource_id)
try:
model_resource_id = selector.select_final_model()
except FileNotFoundError as e:
model_resource_id = None
except Exception as e: # pylint: disable=broad-except
context.abort(code_pb2.NOT_FOUND, f'Error while selecting final model. {e}')
snapshot_id = _get_snapshot_id(
session_resource_id, session, model_resource_id, data_store, context)
# Update session to be ended with snapshot ID.
session.snapshot = snapshot_id
data_store.write_stopped_session(session)
return falken_service_pb2.StopSessionResponse(snapshot_id=snapshot_id)
def _get_snapshot_id(
session_resource_id, session, model_resource_id, data_store, context):
"""Get snapshot ID for the session differentiated by session type.
Creates a new snapshot if appropriate.
Args:
session_resource_id: resource_id.FalkenResourceID for the session being
stopped.
session: data_store_pb2.Session instance.
model_resource_id: resource_id.FalkenResourceID for the model that was
selected by model_selector for this session.
data_store: data_store.DataStore instance to read and write the new
snapshot into.
context: grpc.ServicerContext containing context to abort when issues occur.
Returns:
Snapshot ID string.
Raises:
The gRPC context is aborted when issues occur with reading/writing the
snapshot.
"""
# If training happened (training or evaluation), create new snapshot,
# otherwise return initial snapshot.
if session.session_type == session_pb2.INFERENCE:
# End of an inference session, return the initial snapshot.
try:
return _single_starting_snapshot(
session_resource_id, session.starting_snapshots)
except ValueError as e:
context.abort(code_pb2.INVALID_ARGUMENT, e)
elif session.session_type == session_pb2.INTERACTIVE_TRAINING:
try:
return _create_or_use_existing_snapshot(
session_resource_id, session.starting_snapshots, model_resource_id,
data_store, expect_starting_snapshot=False)
except (FileNotFoundError, resource_store.InternalError,
ValueError) as e:
context.abort(
code_pb2.NOT_FOUND, 'Failed to create snapshot for training session '
f'{session_resource_id.session}. {e}')
elif session.session_type == session_pb2.EVALUATION:
try:
return _create_or_use_existing_snapshot(
session_resource_id, session.starting_snapshots, model_resource_id,
data_store, expect_starting_snapshot=True)
except (FileNotFoundError, resource_store.InternalError,
ValueError) as e:
context.abort(
code_pb2.NOT_FOUND, 'Failed to create snapshot for evaluation session'
f' {session_resource_id.session}. {e}')
else:
context.abort(
code_pb2.INVALID_ARGUMENT,
f'Unsupported session_type: {session.session_type} found in '
f'{session.session_id}.')
def _single_starting_snapshot(session_resource_id, starting_snapshots):
"""Returns the single starting snapshot from a list of snapshot IDs.
Args:
session_resource_id: resource_id.FalkenResourceId for the session with the
starting snapshots.
starting_snapshots: List of snapshot IDs specified as starting snapshots
for the session.
Returns:
Starting snapshot ID string.
Raises:
ValueError if the length of starting snapshots is not 1.
"""
if len(starting_snapshots) != 1:
raise ValueError(
'Unexpected number of starting_snapshots, wanted exactly 1, got '
f'{len(starting_snapshots)} for session {session_resource_id.session}.')
return starting_snapshots[0]
def _create_or_use_existing_snapshot(
session_resource_id, starting_snapshots, model_resource_id,
data_store, expect_starting_snapshot):
"""Return snapshot ID for a new snapshot or existing snapshot.
If a final model was selected by model_selector and passed onto the
model_resource_id, creates a snapshot for this model and return the snapshot
ID. Otherwise, returns the starting snapshot ID.
Args:
session_resource_id: resource_id.FalkenResourceID for the session being
stopped.
starting_snapshots: Starting snapshot IDs of the session being stopped.
model_resource_id: resource_id.FalkenResourceID for the model that was
selected by model_selector for this session.
data_store: data_store.DataStore instance to read and write the new
snapshot into.
expect_starting_snapshot: bool, whether we are expecting a starting
snapshot. If False and no model ID is found, we return an empty string.
Returns:
Snapshot ID string for the created snapshot or the starting snapshot.
Raises:
FileNotFoundError, InternalError for issues while writing the snapshot.
ValueError for issues while getting starting snapshot IDs.
"""
if model_resource_id:
# Use model ID for new snapshot.
model = data_store.read(model_resource_id)
return _create_snapshot(session_resource_id, starting_snapshots,
model_resource_id, model.model_path, data_store)
else:
# Return existing snapshot. Callers with expect_starting_snapshot True will
# raise ValueError if the len(starting_shots) != 1.
if len(starting_snapshots) == 1 or expect_starting_snapshot:
return _single_starting_snapshot(
session_resource_id, starting_snapshots)
return ''
def _create_snapshot(session_resource_id, starting_snapshots,
model_resource_id, model_path, data_store):
"""Creates a new snapshot in data_store.
Args:
session_resource_id: resource_id.FalkenResourceID for the session to create
the new snapshot for.
starting_snapshots: Starting snapshot IDs of the session to create the
new snapshot for.
model_resource_id: resource_id.FalkenResourceID for the model that was
selected by model_selector to create the snapshot for.
model_path: Path for the model to write the snapshot for.
data_store: data_store.DataStore instance to read and write the new
snapshot into.
Returns:
Snapshot ID string for the created snapshot.
Raises:
FileNotFoundError if any starting snapshots cannot be found on data_store.
InternalError if something goes wrong with writing the snapshot on
data_store.
"""
snapshot_resource_id = data_store.resource_id_from_proto_ids(
project_id=session_resource_id.project,
brain_id=session_resource_id.brain,
snapshot_id=unique_id.generate_unique_id())
write_snapshot = data_store_pb2.Snapshot(
project_id=snapshot_resource_id.project,
brain_id=snapshot_resource_id.brain,
snapshot_id=snapshot_resource_id.snapshot,
session=session_resource_id.session)
write_snapshot.model = model_resource_id.model
write_snapshot.model_path = model_path
for starting_snapshot_id in starting_snapshots:
starting_snapshot_resource_id = data_store.resource_id_from_proto_ids(
project_id=snapshot_resource_id.project,
brain_id=snapshot_resource_id.brain,
snapshot_id=starting_snapshot_id)
starting_snapshot = data_store.read(starting_snapshot_resource_id)
# Add parent snapshot to the graph and copy the ancestors.
parent = data_store_pb2.SnapshotParents(
snapshot=snapshot_resource_id.snapshot)
parent.parent_snapshots.append(starting_snapshot_id)
write_snapshot.ancestor_snapshots.append(parent)
for ancestor in starting_snapshot.ancestor_snapshots:
write_snapshot.ancestor_snapshots.append(ancestor)
data_store.write(write_snapshot)
return snapshot_resource_id.snapshot
|
google-research/falken
|
service/api/stop_session_handler.py
|
Python
|
apache-2.0
| 9,830 | 0.005086 |
"""Defines ``Group`` and ``UserPartition`` models for partitioning"""
from collections import namedtuple
from stevedore.extension import ExtensionManager
# We use ``id`` in this file as the IDs of our Groups and UserPartitions,
# which Pylint disapproves of.
# pylint: disable=redefined-builtin
# UserPartition IDs must be unique. The Cohort and Random UserPartitions (when they are
# created via Studio) choose an unused ID in the range of 100 (historical) to MAX_INT. Therefore the
# dynamic UserPartitionIDs must be under 100, and they have to be hard-coded to ensure
# they are always the same whenever the dynamic partition is added (since the UserPartition
# ID is stored in the xblock group_access dict).
ENROLLMENT_TRACK_PARTITION_ID = 50
MINIMUM_STATIC_PARTITION_ID = 100
class UserPartitionError(Exception):
"""
Base Exception for when an error was found regarding user partitions.
"""
pass
class NoSuchUserPartitionError(UserPartitionError):
"""
Exception to be raised when looking up a UserPartition by its ID fails.
"""
pass
class NoSuchUserPartitionGroupError(UserPartitionError):
"""
Exception to be raised when looking up a UserPartition Group by its ID fails.
"""
pass
class ReadOnlyUserPartitionError(UserPartitionError):
"""
Exception to be raised when attempting to modify a read only partition.
"""
pass
class Group(namedtuple("Group", "id name")):
"""
An id and name for a group of students. The id should be unique
within the UserPartition this group appears in.
"""
# in case we want to add to this class, a version will be handy
# for deserializing old versions. (This will be serialized in courses)
VERSION = 1
def __new__(cls, id, name):
return super(Group, cls).__new__(cls, int(id), name)
def to_json(self):
"""
'Serialize' to a json-serializable representation.
Returns:
a dictionary with keys for the properties of the group.
"""
return {
"id": self.id,
"name": self.name,
"version": Group.VERSION
}
@staticmethod
def from_json(value):
"""
Deserialize a Group from a json-like representation.
Args:
value: a dictionary with keys for the properties of the group.
Raises TypeError if the value doesn't have the right keys.
"""
if isinstance(value, Group):
return value
for key in ("id", "name", "version"):
if key not in value:
raise TypeError("Group dict {0} missing value key '{1}'".format(
value, key))
if value["version"] != Group.VERSION:
raise TypeError("Group dict {0} has unexpected version".format(
value))
return Group(value["id"], value["name"])
# The Stevedore extension point namespace for user partition scheme plugins.
USER_PARTITION_SCHEME_NAMESPACE = 'openedx.user_partition_scheme'
class UserPartition(namedtuple("UserPartition", "id name description groups scheme parameters active")):
"""A named way to partition users into groups, primarily intended for
running experiments. It is expected that each user will be in at most one
group in a partition.
A Partition has an id, name, scheme, description, parameters, and a list
of groups. The id is intended to be unique within the context where these
are used. (e.g., for partitions of users within a course, the ids should
be unique per-course). The scheme is used to assign users into groups.
The parameters field is used to save extra parameters e.g., location of
the block in case of VerificationPartitionScheme.
Partitions can be marked as inactive by setting the "active" flag to False.
Any group access rule referencing inactive partitions will be ignored
when performing access checks.
"""
VERSION = 3
# The collection of user partition scheme extensions.
scheme_extensions = None
# The default scheme to be used when upgrading version 1 partitions.
VERSION_1_SCHEME = "random"
def __new__(cls, id, name, description, groups, scheme=None, parameters=None, active=True,
scheme_id=VERSION_1_SCHEME):
if not scheme:
scheme = UserPartition.get_scheme(scheme_id)
if parameters is None:
parameters = {}
return super(UserPartition, cls).__new__(cls, int(id), name, description, groups, scheme, parameters, active)
@staticmethod
def get_scheme(name):
"""
Returns the user partition scheme with the given name.
"""
# Note: we're creating the extension manager lazily to ensure that the Python path
# has been correctly set up. Trying to create this statically will fail, unfortunately.
if not UserPartition.scheme_extensions:
UserPartition.scheme_extensions = ExtensionManager(namespace=USER_PARTITION_SCHEME_NAMESPACE)
try:
scheme = UserPartition.scheme_extensions[name].plugin
except KeyError:
raise UserPartitionError("Unrecognized scheme '{0}'".format(name))
scheme.name = name
return scheme
def to_json(self):
"""
'Serialize' to a json-serializable representation.
Returns:
a dictionary with keys for the properties of the partition.
"""
return {
"id": self.id,
"name": self.name,
"scheme": self.scheme.name,
"description": self.description,
"parameters": self.parameters,
"groups": [g.to_json() for g in self.groups],
"active": bool(self.active),
"version": UserPartition.VERSION
}
@staticmethod
def from_json(value):
"""
Deserialize a Group from a json-like representation.
Args:
value: a dictionary with keys for the properties of the group.
Raises TypeError if the value doesn't have the right keys.
"""
if isinstance(value, UserPartition):
return value
for key in ("id", "name", "description", "version", "groups"):
if key not in value:
raise TypeError("UserPartition dict {0} missing value key '{1}'".format(value, key))
if value["version"] == 1:
# If no scheme was provided, set it to the default ('random')
scheme_id = UserPartition.VERSION_1_SCHEME
# Version changes should be backwards compatible in case the code
# gets rolled back. If we see a version number greater than the current
# version, we should try to read it rather than raising an exception.
elif value["version"] >= 2:
if "scheme" not in value:
raise TypeError("UserPartition dict {0} missing value key 'scheme'".format(value))
scheme_id = value["scheme"]
else:
raise TypeError("UserPartition dict {0} has unexpected version".format(value))
parameters = value.get("parameters", {})
active = value.get("active", True)
groups = [Group.from_json(g) for g in value["groups"]]
scheme = UserPartition.get_scheme(scheme_id)
if not scheme:
raise TypeError("UserPartition dict {0} has unrecognized scheme {1}".format(value, scheme_id))
if getattr(scheme, 'read_only', False):
raise ReadOnlyUserPartitionError("UserPartition dict {0} uses scheme {1} which is read only".format(value, scheme_id))
if hasattr(scheme, "create_user_partition"):
return scheme.create_user_partition(
value["id"],
value["name"],
value["description"],
groups,
parameters,
active,
)
else:
return UserPartition(
value["id"],
value["name"],
value["description"],
groups,
scheme,
parameters,
active,
)
def get_group(self, group_id):
"""
Returns the group with the specified id.
Arguments:
group_id (int): ID of the partition group.
Raises:
NoSuchUserPartitionGroupError: The specified group could not be found.
"""
for group in self.groups:
if group.id == group_id:
return group
raise NoSuchUserPartitionGroupError(
"Could not find a Group with ID [{group_id}] in UserPartition [{partition_id}].".format(
group_id=group_id, partition_id=self.id
)
)
def access_denied_message(self, block_key, user, user_group, allowed_groups):
"""
Return a message that should be displayed to the user when they are not allowed to access
content managed by this partition, or None if there is no applicable message.
Arguments:
block_key (:class:`.BlockUsageLocator`): The content being managed
user (:class:`.User`): The user who was denied access
user_group (:class:`.Group`): The current Group the user is in
allowed_groups (list of :class:`.Group`): The groups who are allowed to see the content
Returns: str
"""
return None
def access_denied_fragment(self, block, user, user_group, allowed_groups):
"""
Return an html fragment that should be displayed to the user when they are not allowed to access
content managed by this partition, or None if there is no applicable message.
Arguments:
block (:class:`.XBlock`): The content being managed
user (:class:`.User`): The user who was denied access
user_group (:class:`.Group`): The current Group the user is in
allowed_groups (list of :class:`.Group`): The groups who are allowed to see the content
Returns: :class:`.Fragment`
"""
return None
def get_partition_from_id(partitions, user_partition_id):
"""
Look for a user partition with a matching id in the provided list of partitions.
Returns:
A UserPartition, or None if not found.
"""
for partition in partitions:
if partition.id == user_partition_id:
return partition
return None
|
msegado/edx-platform
|
common/lib/xmodule/xmodule/partitions/partitions.py
|
Python
|
agpl-3.0
| 10,492 | 0.002669 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 03 09:38:19 2014
"""
from .PulseBase import Pulse
from .beam import OneDBeam
from . import beam
from . import DerivedPulses
from . import PulseBase
from .high_V_waveguide import OneDBeam_highV_WG
|
ycasg/PyNLO
|
src/pynlo/light/__init__.py
|
Python
|
gpl-3.0
| 245 | 0.004082 |
# -*- coding: utf8 -*-
###########################################################################
# This is the package latexparser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# copyright (c) Laurent Claessens, 2010,2012-2016
# email: laurent@claessens-donadello.eu
import codecs
from latexparser.InputPaths import InputPaths
class Occurrence(object):
"""
self.as_written : the code as it appears in the file, including \MyMacro, including the backslash.
self.position : the position at which this occurrence appears.
Example, if we look at the LatexCode
Hello word, \MyMacro{first}
and then \MyMacro{second}
the first occurrence of \MyMacro has position=12
"""
def __init__(self,name,arguments,as_written="",position=0):
self.arguments = arguments
self.number_of_arguments = len(arguments)
self.name = name
self.as_written = as_written
self.arguments_list = arguments
self.position = position
def configuration(self):
r"""
Return the way the arguments are separated in as_written.
Example, if we have
\MyMacro<space>{A}<tab>{B}
{C},
we return the list
["<space>","tab","\n"]
The following has to be true:
self.as_written == self.name+self.configuration()[0]+self.arguments_list[0]+etc.
"""
l=[]
a = self.as_written.split(self.name)[1]
for arg in self.arguments_list:
split = a.split("{"+arg+"}")
separator=split[0]
try:
a=split[1]
except IndexError:
print(self.as_written)
raise
l.append(separator)
return l
def change_argument(self,num,func):
r"""
Apply the function <func> to the <n>th argument of self. Then return a new object.
"""
n=num-1 # Internally, the arguments are numbered from 0.
arguments=self.arguments_list
configuration=self.configuration()
arguments[n]=func(arguments[n])
new_text=self.name
if len(arguments) != len(configuration):
print("Error : length of the configuration list has to be the same as the number of arguments")
raise ValueError
for i in range(len(arguments)):
new_text=new_text+configuration[i]+"{"+arguments[i]+"}"
return Occurrence(self.name,arguments,new_text,self.position)
def analyse(self):
return globals()["Occurrence_"+self.name[1:]](self) # We have to remove the initial "\" in the name of the macro.
def __getitem__(self,a):
return self.arguments[a]
def __str__(self):
return self.as_written
class Occurrence_newlabel(object):
r"""
takes an occurrence of \newlabel and creates an object which contains the information.
In the self.section_name we remove "\relax" from the string.
"""
def __init__(self,occurrence):
self.occurrence = occurrence
self.arguments = self.occurrence.arguments
if len(self.arguments) == 0 :
self.name = "Non interesting; probably the definition"
self.listoche = [None,None,None,None,None]
self.value,self.page,self.section_name,self.fourth,self.fifth=(None,None,None,None,None)
else :
self.name = self.arguments[0][0]
self.listoche = [a[0] for a in SearchArguments(self.arguments[1][0],5)[0]]
self.value = self.listoche[0]
self.page = self.listoche[1]
self.section_name = self.listoche[2].replace(r"\relax","")
self.fourth = self.listoche[3] # I don't know the role of the fourth argument of \newlabel
self.fifth = self.listoche[4] # I don't know the role of the fifth argument of \newlabel
class Occurrence_addInputPath(object):
def __init__(self,Occurrence):
self.directory=Occurrence[0]
class Occurrence_cite(object):
def __init__(self,occurrence):
self.label = occurrence[0]
def entry(self,codeBibtex):
return codeBibtex[self.label]
class Occurrence_newcommand(object):
def __init__(self,occurrence):
self.occurrence = occurrence
self.number_of_arguments = 0
if self.occurrence[1][1] == "[]":
self.number_of_arguments = self.occurrence[1][0]
self.name = self.occurrence[0][0]#[0]
self.definition = self.occurrence[-1][0]
class Occurrence_label(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_ref(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_eqref(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_input(Occurrence):
def __init__(self,occurrence):
Occurrence.__init__(self,occurrence.name,occurrence.arguments,as_written=occurrence.as_written,position=occurrence.position)
self.occurrence = occurrence
self.filename = self.occurrence[0]
self.input_paths=InputPaths()
self._file_content=None # Make file_content "lazy"
def file_content(self,input_paths=None):
r"""
return the content of the file corresponding to this occurrence of
\input.
This is not recursive.
- 'input_path' is the list of paths in which we can search for files.
See the macro `\addInputPath` in the file
https://github.com/LaurentClaessens/mazhe/blob/master/configuration.tex
"""
import os.path
# Memoize
if self._file_content is not None :
return self._file_content
# At least, we are searching in the current directory :
if input_paths is None :
raise # Just to know who should do something like that
# Creating the filename
filename=self.filename
strict_filename = filename
if "." not in filename:
strict_filename=filename+".tex"
# Searching for the correct file in the subdirectories
fn=input_paths.get_file(strict_filename)
try:
# Without [:-1] I got an artificial empty line at the end.
text = "".join( codecs.open(fn,"r",encoding="utf8") )[:-1]
except IOError :
print("Warning : file %s not found."%strict_filename)
raise
self._file_content=text
return self._file_content
|
LaurentClaessens/LaTeXparser
|
Occurrence.py
|
Python
|
gpl-3.0
| 7,331 | 0.015278 |
#!/usr/bin/python
'''
nctu_cs_wired_topo.gy
'''
from mininet.cluster.net import MininetCluster
from mininet.cluster.placer import DFSPlacer
from mininet.log import setLogLevel
from mininet.cluster.cli import ClusterCLI as CLI
from mininet.node import Controller, RemoteController
from mininet.topo import Topo
from itertools import combinations
CONTROLLER_IP = "192.168.59.100"
CONTROLLER_PORT = 6633
SERVER_LIST = [ 'mininet1', 'mininet2' ]
class NCTU_EC_Topology( Topo ):
def __init__(self, core=1, agg=6, access=6, host=5, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
self.core_num = core
self.agg_num = agg
self.access_num = access
self.host_num = host
self.sw_id = 1
self.host_id = 1
# Init switch and host list
self.core_sw_list = []
self.agg_sw_list = []
self.access_sw_list = []
self.host_list = []
self.create_top_switch( "core", self.core_num, self.core_sw_list )
self.handle_top_down( "agg", self.agg_num, self.core_sw_list, self.agg_sw_list )
self.handle_top_down( "access", self.access_num, self.agg_sw_list, self.access_sw_list )
self.handle_host( "h", self.host_num, self.host_list )
self.handle_mesh( self.agg_sw_list )
def create_top_switch( self, prefix_name, sw_num, sw_list):
for i in xrange(1, sw_num+1):
sw_list.append(self.addSwitch("{0}{1}".format(prefix_name, i), dpid='{0:x}'.format(self.sw_id)))
self.sw_id += 1
def handle_top_down( self, prefix_name, num, top_list, down_list):
temp = 0
for i in xrange(0, len(top_list)):
for j in xrange(1, num+1):
switch = self.addSwitch("{0}{1}".format(prefix_name, j + temp), dpid='{0:x}'.format(self.sw_id))
self.addLink(top_list[i], switch)
down_list.append(switch)
self.sw_id += 1
temp = j
def handle_host( self, prefix_name, host_num, host_list ):
for i in xrange(0, len(self.access_sw_list)):
for j in xrange(0, host_num):
host = self.addHost('{0}{1}'.format(prefix_name, self.host_id))
# Link to access sw
self.addLink(self.access_sw_list[i], host)
# Append host to list
host_list.append(host)
self.host_id += 1
def handle_mesh( self, sw_list ):
for link in combinations(sw_list, 2):
self.addLink(link[0], link[1])
def RunTestBed():
# NCTU_EC_Topology( Core Switch, Aggregate Switch, Access Switch, Host)
topo = NCTU_EC_Topology(core=1, agg=6, access=6, host=20)
net = MininetCluster( controller=RemoteController, topo=topo, servers=SERVER_LIST, placement=DFSPlacer, root_node="core1", tunneling="vxlan" )
net.addController( 'controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel('info')
RunTestBed()
|
pichuang/OpenNet
|
mininet-patch/examples/cluster/nctu_ec_wired_topo.py
|
Python
|
gpl-2.0
| 3,067 | 0.011412 |
import io
from setuptools import (
setup,
find_packages,
) # pylint: disable=no-name-in-module,import-error
def dependencies(file):
with open(file) as f:
return f.read().splitlines()
with io.open("README.md", encoding="utf-8") as infile:
long_description = infile.read()
setup(
name="smith_the_crawler",
packages=find_packages(exclude=("tests", "examples")),
version="0.0.12-alpha",
# license="MIT",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
python_requires=">=3.1",
description="A webscraper with a sofisticated toolkit to scrap the world",
long_description=long_description,
long_description_content_type="text/markdown",
download_url="https://github.com/VictorAlessander/Smith/archive/refs/tags/v0.0.12-alpha.tar.gz",
author="Victor Alessander",
author_email="victor.alessander.gr@gmail.com",
url="https://github.com/VictorAlessander/Smith",
keywords=[
"crawler",
"webscraping",
"webscraper",
"investments",
"investment",
"invest",
],
install_requires=[
"beautifulsoup4",
"plotly",
"requests",
"pandas",
"fake-useragent",
"openpyxl",
],
# install_requires=dependencies('requirements.txt'),
# tests_require=dependencies("requirements-dev.txt"),
include_package_data=True,
# extras_require={"ipython": ["IPython==5.7.0", "ipywidgets==7.1.0",]},
)
|
VictorAlessander/Smith
|
setup.py
|
Python
|
gpl-3.0
| 1,730 | 0.000578 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetEvaluation
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datalabeling
# [START datalabeling_v1beta1_generated_DataLabelingService_GetEvaluation_sync]
from google.cloud import datalabeling_v1beta1
def sample_get_evaluation():
# Create a client
client = datalabeling_v1beta1.DataLabelingServiceClient()
# Initialize request argument(s)
request = datalabeling_v1beta1.GetEvaluationRequest(
name="name_value",
)
# Make the request
response = client.get_evaluation(request=request)
# Handle the response
print(response)
# [END datalabeling_v1beta1_generated_DataLabelingService_GetEvaluation_sync]
|
googleapis/python-datalabeling
|
samples/generated_samples/datalabeling_v1beta1_generated_data_labeling_service_get_evaluation_sync.py
|
Python
|
apache-2.0
| 1,513 | 0.000661 |
import datetime
import decimal
import inspect
import itertools
import re
import socket
import time
import uuid
from io import BytesIO
from operator import itemgetter
import gridfs
import pymongo
from bson import SON, Binary, DBRef, ObjectId
from bson.int64 import Int64
from pymongo import ReturnDocument
try:
import dateutil
except ImportError:
dateutil = None
else:
import dateutil.parser
from mongoengine.base import (
BaseDocument,
BaseField,
ComplexBaseField,
GeoJsonBaseField,
LazyReference,
ObjectIdField,
get_document,
)
from mongoengine.base.utils import LazyRegexCompiler
from mongoengine.common import _import_class
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.errors import (
DoesNotExist,
InvalidQueryError,
ValidationError,
)
from mongoengine.queryset import DO_NOTHING
from mongoengine.queryset.base import BaseQuerySet
from mongoengine.queryset.transform import STRING_OPERATORS
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = (
"StringField",
"URLField",
"EmailField",
"IntField",
"LongField",
"FloatField",
"DecimalField",
"BooleanField",
"DateTimeField",
"DateField",
"ComplexDateTimeField",
"EmbeddedDocumentField",
"ObjectIdField",
"GenericEmbeddedDocumentField",
"DynamicField",
"ListField",
"SortedListField",
"EmbeddedDocumentListField",
"DictField",
"MapField",
"ReferenceField",
"CachedReferenceField",
"LazyReferenceField",
"GenericLazyReferenceField",
"GenericReferenceField",
"BinaryField",
"GridFSError",
"GridFSProxy",
"FileField",
"ImageGridFsProxy",
"ImproperlyConfigured",
"ImageField",
"GeoPointField",
"PointField",
"LineStringField",
"PolygonField",
"SequenceField",
"UUIDField",
"EnumField",
"MultiPointField",
"MultiLineStringField",
"MultiPolygonField",
"GeoJsonBaseField",
)
RECURSIVE_REFERENCE_CONSTANT = "self"
class StringField(BaseField):
"""A unicode string field."""
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
"""
:param regex: (optional) A string pattern that will be applied during validation
:param max_length: (optional) A max length that will be applied during validation
:param min_length: (optional) A min length that will be applied during validation
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
super().__init__(**kwargs)
def to_python(self, value):
if isinstance(value, str):
return value
try:
value = value.decode("utf-8")
except Exception:
pass
return value
def validate(self, value):
if not isinstance(value, str):
self.error("StringField only accepts string values")
if self.max_length is not None and len(value) > self.max_length:
self.error("String value is too long")
if self.min_length is not None and len(value) < self.min_length:
self.error("String value is too short")
if self.regex is not None and self.regex.match(value) is None:
self.error("String value did not match validation regex")
def lookup_member(self, member_name):
return None
def prepare_query_value(self, op, value):
if not isinstance(op, str):
return value
if op in STRING_OPERATORS:
case_insensitive = op.startswith("i")
op = op.lstrip("i")
flags = re.IGNORECASE if case_insensitive else 0
regex = r"%s"
if op == "startswith":
regex = r"^%s"
elif op == "endswith":
regex = r"%s$"
elif op == "exact":
regex = r"^%s$"
elif op == "wholeword":
regex = r"\b%s\b"
elif op == "regex":
regex = value
if op == "regex":
value = re.compile(regex, flags)
else:
# escape unsafe characters which could lead to a re.error
value = re.escape(value)
value = re.compile(regex % value, flags)
return super().prepare_query_value(op, value)
class URLField(StringField):
"""A field that validates input as an URL."""
_URL_REGEX = LazyRegexCompiler(
r"^(?:[a-z0-9\.\-]*)://" # scheme is validated separately
r"(?:(?:[A-Z0-9](?:[A-Z0-9-_]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|" # domain...
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|" # ...or ipv4
r"\[?[A-F0-9]*:[A-F0-9:]+\]?)" # ...or ipv6
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
_URL_SCHEMES = ["http", "https", "ftp", "ftps"]
def __init__(self, url_regex=None, schemes=None, **kwargs):
"""
:param url_regex: (optional) Overwrite the default regex used for validation
:param schemes: (optional) Overwrite the default URL schemes that are allowed
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.url_regex = url_regex or self._URL_REGEX
self.schemes = schemes or self._URL_SCHEMES
super().__init__(**kwargs)
def validate(self, value):
# Check first if the scheme is valid
scheme = value.split("://")[0].lower()
if scheme not in self.schemes:
self.error(f"Invalid scheme {scheme} in URL: {value}")
# Then check full URL
if not self.url_regex.match(value):
self.error(f"Invalid URL: {value}")
class EmailField(StringField):
"""A field that validates input as an email address."""
USER_REGEX = LazyRegexCompiler(
# `dot-atom` defined in RFC 5322 Section 3.2.3.
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z"
# `quoted-string` defined in RFC 5322 Section 3.2.4.
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)',
re.IGNORECASE,
)
UTF8_USER_REGEX = LazyRegexCompiler(
(
# RFC 6531 Section 3.3 extends `atext` (used by dot-atom) to
# include `UTF8-non-ascii`.
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z\u0080-\U0010FFFF]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z\u0080-\U0010FFFF]+)*\Z"
# `quoted-string`
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)'
),
re.IGNORECASE | re.UNICODE,
)
DOMAIN_REGEX = LazyRegexCompiler(
r"((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z",
re.IGNORECASE,
)
error_msg = "Invalid email address: %s"
def __init__(
self,
domain_whitelist=None,
allow_utf8_user=False,
allow_ip_domain=False,
*args,
**kwargs,
):
"""
:param domain_whitelist: (optional) list of valid domain names applied during validation
:param allow_utf8_user: Allow user part of the email to contain utf8 char
:param allow_ip_domain: Allow domain part of the email to be an IPv4 or IPv6 address
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.domain_whitelist = domain_whitelist or []
self.allow_utf8_user = allow_utf8_user
self.allow_ip_domain = allow_ip_domain
super().__init__(*args, **kwargs)
def validate_user_part(self, user_part):
"""Validate the user part of the email address. Return True if
valid and False otherwise.
"""
if self.allow_utf8_user:
return self.UTF8_USER_REGEX.match(user_part)
return self.USER_REGEX.match(user_part)
def validate_domain_part(self, domain_part):
"""Validate the domain part of the email address. Return True if
valid and False otherwise.
"""
# Skip domain validation if it's in the whitelist.
if domain_part in self.domain_whitelist:
return True
if self.DOMAIN_REGEX.match(domain_part):
return True
# Validate IPv4/IPv6, e.g. user@[192.168.0.1]
if self.allow_ip_domain and domain_part[0] == "[" and domain_part[-1] == "]":
for addr_family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(addr_family, domain_part[1:-1])
return True
except (OSError, UnicodeEncodeError):
pass
return False
def validate(self, value):
super().validate(value)
if "@" not in value:
self.error(self.error_msg % value)
user_part, domain_part = value.rsplit("@", 1)
# Validate the user part.
if not self.validate_user_part(user_part):
self.error(self.error_msg % value)
# Validate the domain and, if invalid, see if it's IDN-encoded.
if not self.validate_domain_part(domain_part):
try:
domain_part = domain_part.encode("idna").decode("ascii")
except UnicodeError:
self.error(
"{} {}".format(
self.error_msg % value, "(domain failed IDN encoding)"
)
)
else:
if not self.validate_domain_part(domain_part):
self.error(
"{} {}".format(
self.error_msg % value, "(domain validation failed)"
)
)
class IntField(BaseField):
"""32-bit integer field."""
def __init__(self, min_value=None, max_value=None, **kwargs):
"""
:param min_value: (optional) A min value that will be applied during validation
:param max_value: (optional) A max value that will be applied during validation
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.min_value, self.max_value = min_value, max_value
super().__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except (TypeError, ValueError):
pass
return value
def validate(self, value):
try:
value = int(value)
except (TypeError, ValueError):
self.error("%s could not be converted to int" % value)
if self.min_value is not None and value < self.min_value:
self.error("Integer value is too small")
if self.max_value is not None and value > self.max_value:
self.error("Integer value is too large")
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, int(value))
class LongField(BaseField):
"""64-bit integer field. (Equivalent to IntField since the support to Python2 was dropped)"""
def __init__(self, min_value=None, max_value=None, **kwargs):
"""
:param min_value: (optional) A min value that will be applied during validation
:param max_value: (optional) A max value that will be applied during validation
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.min_value, self.max_value = min_value, max_value
super().__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except (TypeError, ValueError):
pass
return value
def to_mongo(self, value):
return Int64(value)
def validate(self, value):
try:
value = int(value)
except (TypeError, ValueError):
self.error("%s could not be converted to long" % value)
if self.min_value is not None and value < self.min_value:
self.error("Long value is too small")
if self.max_value is not None and value > self.max_value:
self.error("Long value is too large")
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, int(value))
class FloatField(BaseField):
"""Floating point number field."""
def __init__(self, min_value=None, max_value=None, **kwargs):
"""
:param min_value: (optional) A min value that will be applied during validation
:param max_value: (optional) A max value that will be applied during validation
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.min_value, self.max_value = min_value, max_value
super().__init__(**kwargs)
def to_python(self, value):
try:
value = float(value)
except ValueError:
pass
return value
def validate(self, value):
if isinstance(value, int):
try:
value = float(value)
except OverflowError:
self.error("The value is too large to be converted to float")
if not isinstance(value, float):
self.error("FloatField only accepts float and integer values")
if self.min_value is not None and value < self.min_value:
self.error("Float value is too small")
if self.max_value is not None and value > self.max_value:
self.error("Float value is too large")
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, float(value))
class DecimalField(BaseField):
"""Fixed-point decimal number field. Stores the value as a float by default unless `force_string` is used.
If using floats, beware of Decimal to float conversion (potential precision loss)
"""
def __init__(
self,
min_value=None,
max_value=None,
force_string=False,
precision=2,
rounding=decimal.ROUND_HALF_UP,
**kwargs,
):
"""
:param min_value: (optional) A min value that will be applied during validation
:param max_value: (optional) A max value that will be applied during validation
:param force_string: Store the value as a string (instead of a float).
Be aware that this affects query sorting and operation like lte, gte (as string comparison is applied)
and some query operator won't work (e.g. inc, dec)
:param precision: Number of decimal places to store.
:param rounding: The rounding rule from the python decimal library:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Defaults to: ``decimal.ROUND_HALF_UP``
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.min_value = min_value
self.max_value = max_value
self.force_string = force_string
if precision < 0 or not isinstance(precision, int):
self.error("precision must be a positive integer")
self.precision = precision
self.rounding = rounding
super().__init__(**kwargs)
def to_python(self, value):
if value is None:
return value
# Convert to string for python 2.6 before casting to Decimal
try:
value = decimal.Decimal("%s" % value)
except (TypeError, ValueError, decimal.InvalidOperation):
return value
if self.precision > 0:
return value.quantize(
decimal.Decimal(".%s" % ("0" * self.precision)), rounding=self.rounding
)
else:
return value.quantize(decimal.Decimal(), rounding=self.rounding)
def to_mongo(self, value):
if value is None:
return value
if self.force_string:
return str(self.to_python(value))
return float(self.to_python(value))
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, str):
value = str(value)
try:
value = decimal.Decimal(value)
except (TypeError, ValueError, decimal.InvalidOperation) as exc:
self.error("Could not convert value to decimal: %s" % exc)
if self.min_value is not None and value < self.min_value:
self.error("Decimal value is too small")
if self.max_value is not None and value > self.max_value:
self.error("Decimal value is too large")
def prepare_query_value(self, op, value):
return super().prepare_query_value(op, self.to_mongo(value))
class BooleanField(BaseField):
"""Boolean field type."""
def to_python(self, value):
try:
value = bool(value)
except (ValueError, TypeError):
pass
return value
def validate(self, value):
if not isinstance(value, bool):
self.error("BooleanField only accepts boolean values")
class DateTimeField(BaseField):
"""Datetime field.
Uses the python-dateutil library if available alternatively use time.strptime
to parse the dates. Note: python-dateutil's parser is fully featured and when
installed you can utilise it to convert varying types of date formats into valid
python datetime objects.
Note: To default the field to the current datetime, use: DateTimeField(default=datetime.utcnow)
Note: Microseconds are rounded to the nearest millisecond.
Pre UTC microsecond support is effectively broken.
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
need accurate microsecond support.
"""
def validate(self, value):
new_value = self.to_mongo(value)
if not isinstance(new_value, (datetime.datetime, datetime.date)):
self.error('cannot parse date "%s"' % value)
def to_mongo(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if callable(value):
return value()
if isinstance(value, str):
return self._parse_datetime(value)
else:
return None
@staticmethod
def _parse_datetime(value):
# Attempt to parse a datetime from a string
value = value.strip()
if not value:
return None
if dateutil:
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError, OverflowError):
return None
# split usecs, because they are not recognized by strptime.
if "." in value:
try:
value, usecs = value.split(".")
usecs = int(usecs)
except ValueError:
return None
else:
usecs = 0
kwargs = {"microsecond": usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(
*time.strptime(value, "%Y-%m-%d %H:%M:%S")[:6], **kwargs
)
except ValueError:
try: # Try without seconds.
return datetime.datetime(
*time.strptime(value, "%Y-%m-%d %H:%M")[:5], **kwargs
)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(
*time.strptime(value, "%Y-%m-%d")[:3], **kwargs
)
except ValueError:
return None
def prepare_query_value(self, op, value):
return super().prepare_query_value(op, self.to_mongo(value))
class DateField(DateTimeField):
def to_mongo(self, value):
value = super().to_mongo(value)
# drop hours, minutes, seconds
if isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month, value.day)
return value
def to_python(self, value):
value = super().to_python(value)
# convert datetime to date
if isinstance(value, datetime.datetime):
value = datetime.date(value.year, value.month, value.day)
return value
class ComplexDateTimeField(StringField):
"""
ComplexDateTimeField handles microseconds exactly instead of rounding
like DateTimeField does.
Derives from a StringField so you can do `gte` and `lte` filtering by
using lexicographical comparison when filtering / sorting strings.
The stored string has the following format:
YYYY,MM,DD,HH,MM,SS,NNNNNN
Where NNNNNN is the number of microseconds of the represented `datetime`.
The `,` as the separator can be easily modified by passing the `separator`
keyword when initializing the field.
Note: To default the field to the current datetime, use: DateTimeField(default=datetime.utcnow)
"""
def __init__(self, separator=",", **kwargs):
"""
:param separator: Allows to customize the separator used for storage (default ``,``)
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.separator = separator
self.format = separator.join(["%Y", "%m", "%d", "%H", "%M", "%S", "%f"])
super().__init__(**kwargs)
def _convert_from_datetime(self, val):
"""
Convert a `datetime` object to a string representation (which will be
stored in MongoDB). This is the reverse function of
`_convert_from_string`.
>>> a = datetime(2011, 6, 8, 20, 26, 24, 92284)
>>> ComplexDateTimeField()._convert_from_datetime(a)
'2011,06,08,20,26,24,092284'
"""
return val.strftime(self.format)
def _convert_from_string(self, data):
"""
Convert a string representation to a `datetime` object (the object you
will manipulate). This is the reverse function of
`_convert_from_datetime`.
>>> a = '2011,06,08,20,26,24,092284'
>>> ComplexDateTimeField()._convert_from_string(a)
datetime.datetime(2011, 6, 8, 20, 26, 24, 92284)
"""
values = [int(d) for d in data.split(self.separator)]
return datetime.datetime(*values)
def __get__(self, instance, owner):
if instance is None:
return self
data = super().__get__(instance, owner)
if isinstance(data, datetime.datetime) or data is None:
return data
return self._convert_from_string(data)
def __set__(self, instance, value):
super().__set__(instance, value)
value = instance._data[self.name]
if value is not None:
if isinstance(value, datetime.datetime):
instance._data[self.name] = self._convert_from_datetime(value)
else:
instance._data[self.name] = value
def validate(self, value):
value = self.to_python(value)
if not isinstance(value, datetime.datetime):
self.error("Only datetime objects may used in a ComplexDateTimeField")
def to_python(self, value):
original_value = value
try:
return self._convert_from_string(value)
except Exception:
return original_value
def to_mongo(self, value):
value = self.to_python(value)
return self._convert_from_datetime(value)
def prepare_query_value(self, op, value):
return super().prepare_query_value(op, self._convert_from_datetime(value))
class EmbeddedDocumentField(BaseField):
"""An embedded document field - with a declared document_type.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
"""
def __init__(self, document_type, **kwargs):
# XXX ValidationError raised outside of the "validate" method.
if not (
isinstance(document_type, str)
or issubclass(document_type, EmbeddedDocument)
):
self.error(
"Invalid embedded document class provided to an "
"EmbeddedDocumentField"
)
self.document_type_obj = document_type
super().__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, str):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
resolved_document_type = self.owner_document
else:
resolved_document_type = get_document(self.document_type_obj)
if not issubclass(resolved_document_type, EmbeddedDocument):
# Due to the late resolution of the document_type
# There is a chance that it won't be an EmbeddedDocument (#1661)
self.error(
"Invalid embedded document class provided to an "
"EmbeddedDocumentField"
)
self.document_type_obj = resolved_document_type
return self.document_type_obj
def to_python(self, value):
if not isinstance(value, self.document_type):
return self.document_type._from_son(
value, _auto_dereference=self._auto_dereference
)
return value
def to_mongo(self, value, use_db_field=True, fields=None):
if not isinstance(value, self.document_type):
return value
return self.document_type.to_mongo(value, use_db_field, fields)
def validate(self, value, clean=True):
"""Make sure that the document instance is an instance of the
EmbeddedDocument subclass provided when the document was defined.
"""
# Using isinstance also works for subclasses of self.document
if not isinstance(value, self.document_type):
self.error(
"Invalid embedded document instance provided to an "
"EmbeddedDocumentField"
)
self.document_type.validate(value, clean)
def lookup_member(self, member_name):
doc_and_subclasses = [self.document_type] + self.document_type.__subclasses__()
for doc_type in doc_and_subclasses:
field = doc_type._fields.get(member_name)
if field:
return field
def prepare_query_value(self, op, value):
if value is not None and not isinstance(value, self.document_type):
# Short circuit for special operators, returning them as is
if isinstance(value, dict) and all(k.startswith("$") for k in value.keys()):
return value
try:
value = self.document_type._from_son(value)
except ValueError:
raise InvalidQueryError(
"Querying the embedded document '%s' failed, due to an invalid query value"
% (self.document_type._class_name,)
)
super().prepare_query_value(op, value)
return self.to_mongo(value)
class GenericEmbeddedDocumentField(BaseField):
"""A generic embedded document field - allows any
:class:`~mongoengine.EmbeddedDocument` to be stored.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
.. note ::
You can use the choices param to limit the acceptable
EmbeddedDocument types
"""
def prepare_query_value(self, op, value):
return super().prepare_query_value(op, self.to_mongo(value))
def to_python(self, value):
if isinstance(value, dict):
doc_cls = get_document(value["_cls"])
value = doc_cls._from_son(value)
return value
def validate(self, value, clean=True):
if self.choices and isinstance(value, SON):
for choice in self.choices:
if value["_cls"] == choice._class_name:
return True
if not isinstance(value, EmbeddedDocument):
self.error(
"Invalid embedded document instance provided to an "
"GenericEmbeddedDocumentField"
)
value.validate(clean=clean)
def lookup_member(self, member_name):
document_choices = self.choices or []
for document_choice in document_choices:
doc_and_subclasses = [document_choice] + document_choice.__subclasses__()
for doc_type in doc_and_subclasses:
field = doc_type._fields.get(member_name)
if field:
return field
def to_mongo(self, document, use_db_field=True, fields=None):
if document is None:
return None
data = document.to_mongo(use_db_field, fields)
if "_cls" not in data:
data["_cls"] = document._class_name
return data
class DynamicField(BaseField):
"""A truly dynamic field type capable of handling different and varying
types of data.
Used by :class:`~mongoengine.DynamicDocument` to handle dynamic data"""
def to_mongo(self, value, use_db_field=True, fields=None):
"""Convert a Python type to a MongoDB compatible type."""
if isinstance(value, str):
return value
if hasattr(value, "to_mongo"):
cls = value.__class__
val = value.to_mongo(use_db_field, fields)
# If we its a document thats not inherited add _cls
if isinstance(value, Document):
val = {"_ref": value.to_dbref(), "_cls": cls.__name__}
if isinstance(value, EmbeddedDocument):
val["_cls"] = cls.__name__
return val
if not isinstance(value, (dict, list, tuple)):
return value
is_list = False
if not hasattr(value, "items"):
is_list = True
value = {k: v for k, v in enumerate(value)}
data = {}
for k, v in value.items():
data[k] = self.to_mongo(v, use_db_field, fields)
value = data
if is_list: # Convert back to a list
value = [v for k, v in sorted(data.items(), key=itemgetter(0))]
return value
def to_python(self, value):
if isinstance(value, dict) and "_cls" in value:
doc_cls = get_document(value["_cls"])
if "_ref" in value:
value = doc_cls._get_db().dereference(value["_ref"])
return doc_cls._from_son(value)
return super().to_python(value)
def lookup_member(self, member_name):
return member_name
def prepare_query_value(self, op, value):
if isinstance(value, str):
return StringField().prepare_query_value(op, value)
return super().prepare_query_value(op, self.to_mongo(value))
def validate(self, value, clean=True):
if hasattr(value, "validate"):
value.validate(clean=clean)
class ListField(ComplexBaseField):
"""A list field that wraps a standard field, allowing multiple instances
of the field to be used as a list in the database.
If using with ReferenceFields see: :ref:`many-to-many-with-listfields`
.. note::
Required means it cannot be empty - as the default for ListFields is []
"""
def __init__(self, field=None, max_length=None, **kwargs):
self.max_length = max_length
kwargs.setdefault("default", lambda: [])
super().__init__(field=field, **kwargs)
def __get__(self, instance, owner):
if instance is None:
# Document class being used rather than a document object
return self
value = instance._data.get(self.name)
LazyReferenceField = _import_class("LazyReferenceField")
GenericLazyReferenceField = _import_class("GenericLazyReferenceField")
if (
isinstance(self.field, (LazyReferenceField, GenericLazyReferenceField))
and value
):
instance._data[self.name] = [self.field.build_lazyref(x) for x in value]
return super().__get__(instance, owner)
def validate(self, value):
"""Make sure that a list of valid fields is being used."""
if not isinstance(value, (list, tuple, BaseQuerySet)):
self.error("Only lists and tuples may be used in a list field")
# Validate that max_length is not exceeded.
# NOTE It's still possible to bypass this enforcement by using $push.
# However, if the document is reloaded after $push and then re-saved,
# the validation error will be raised.
if self.max_length is not None and len(value) > self.max_length:
self.error("List is too long")
super().validate(value)
def prepare_query_value(self, op, value):
# Validate that the `set` operator doesn't contain more items than `max_length`.
if op == "set" and self.max_length is not None and len(value) > self.max_length:
self.error("List is too long")
if self.field:
# If the value is iterable and it's not a string nor a
# BaseDocument, call prepare_query_value for each of its items.
if (
op in ("set", "unset", None)
and hasattr(value, "__iter__")
and not isinstance(value, str)
and not isinstance(value, BaseDocument)
):
return [self.field.prepare_query_value(op, v) for v in value]
return self.field.prepare_query_value(op, value)
return super().prepare_query_value(op, value)
class EmbeddedDocumentListField(ListField):
"""A :class:`~mongoengine.ListField` designed specially to hold a list of
embedded documents to provide additional query helpers.
.. note::
The only valid list values are subclasses of
:class:`~mongoengine.EmbeddedDocument`.
"""
def __init__(self, document_type, **kwargs):
"""
:param document_type: The type of
:class:`~mongoengine.EmbeddedDocument` the list will hold.
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.ListField`
"""
super().__init__(field=EmbeddedDocumentField(document_type), **kwargs)
class SortedListField(ListField):
"""A ListField that sorts the contents of its list before writing to
the database in order to ensure that a sorted list is always
retrieved.
.. warning::
There is a potential race condition when handling lists. If you set /
save the whole list then other processes trying to save the whole list
as well could overwrite changes. The safest way to append to a list is
to perform a push operation.
"""
def __init__(self, field, **kwargs):
self._ordering = kwargs.pop("ordering", None)
self._order_reverse = kwargs.pop("reverse", False)
super().__init__(field, **kwargs)
def to_mongo(self, value, use_db_field=True, fields=None):
value = super().to_mongo(value, use_db_field, fields)
if self._ordering is not None:
return sorted(
value, key=itemgetter(self._ordering), reverse=self._order_reverse
)
return sorted(value, reverse=self._order_reverse)
def key_not_string(d):
"""Helper function to recursively determine if any key in a
dictionary is not a string.
"""
for k, v in d.items():
if not isinstance(k, str) or (isinstance(v, dict) and key_not_string(v)):
return True
def key_starts_with_dollar(d):
"""Helper function to recursively determine if any key in a
dictionary starts with a dollar
"""
for k, v in d.items():
if (k.startswith("$")) or (isinstance(v, dict) and key_starts_with_dollar(v)):
return True
class DictField(ComplexBaseField):
"""A dictionary field that wraps a standard Python dictionary. This is
similar to an embedded document, but the structure is not defined.
.. note::
Required means it cannot be empty - as the default for DictFields is {}
"""
def __init__(self, field=None, *args, **kwargs):
self._auto_dereference = False
kwargs.setdefault("default", lambda: {})
super().__init__(*args, field=field, **kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used."""
if not isinstance(value, dict):
self.error("Only dictionaries may be used in a DictField")
if key_not_string(value):
msg = "Invalid dictionary key - documents must have only string keys"
self.error(msg)
# Following condition applies to MongoDB >= 3.6
# older Mongo has stricter constraints but
# it will be rejected upon insertion anyway
# Having a validation that depends on the MongoDB version
# is not straightforward as the field isn't aware of the connected Mongo
if key_starts_with_dollar(value):
self.error(
'Invalid dictionary key name - keys may not startswith "$" characters'
)
super().validate(value)
def lookup_member(self, member_name):
return DictField(db_field=member_name)
def prepare_query_value(self, op, value):
match_operators = [*STRING_OPERATORS]
if op in match_operators and isinstance(value, str):
return StringField().prepare_query_value(op, value)
if hasattr(
self.field, "field"
): # Used for instance when using DictField(ListField(IntField()))
if op in ("set", "unset") and isinstance(value, dict):
return {
k: self.field.prepare_query_value(op, v) for k, v in value.items()
}
return self.field.prepare_query_value(op, value)
return super().prepare_query_value(op, value)
class MapField(DictField):
"""A field that maps a name to a specified field type. Similar to
a DictField, except the 'value' of each item must match the specified
field type.
"""
def __init__(self, field=None, *args, **kwargs):
# XXX ValidationError raised outside of the "validate" method.
if not isinstance(field, BaseField):
self.error("Argument to MapField constructor must be a valid field")
super().__init__(field=field, *args, **kwargs)
class ReferenceField(BaseField):
"""A reference to a document that will be automatically dereferenced on
access (lazily).
Note this means you will get a database I/O access everytime you access
this field. This is necessary because the field returns a :class:`~mongoengine.Document`
which precise type can depend of the value of the `_cls` field present in the
document in database.
In short, using this type of field can lead to poor performances (especially
if you access this field only to retrieve it `pk` field which is already
known before dereference). To solve this you should consider using the
:class:`~mongoengine.fields.LazyReferenceField`.
Use the `reverse_delete_rule` to handle what should happen if the document
the field is referencing is deleted. EmbeddedDocuments, DictFields and
MapFields does not support reverse_delete_rule and an `InvalidDocumentError`
will be raised if trying to set on one of these Document / Field types.
The options are:
* DO_NOTHING (0) - don't do anything (default).
* NULLIFY (1) - Updates the reference to null.
* CASCADE (2) - Deletes the documents associated with the reference.
* DENY (3) - Prevent the deletion of the reference object.
* PULL (4) - Pull the reference from a :class:`~mongoengine.fields.ListField` of references
Alternative syntax for registering delete rules (useful when implementing
bi-directional delete rules)
.. code-block:: python
class Org(Document):
owner = ReferenceField('User')
class User(Document):
org = ReferenceField('Org', reverse_delete_rule=CASCADE)
User.register_delete_rule(Org, 'owner', DENY)
"""
def __init__(
self, document_type, dbref=False, reverse_delete_rule=DO_NOTHING, **kwargs
):
"""Initialises the Reference Field.
:param document_type: The type of Document that will be referenced
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
.. note ::
A reference to an abstract document type is always stored as a
:class:`~pymongo.dbref.DBRef`, regardless of the value of `dbref`.
"""
# XXX ValidationError raised outside of the "validate" method.
if not isinstance(document_type, str) and not issubclass(
document_type, Document
):
self.error(
"Argument to ReferenceField constructor must be a "
"document class or a string"
)
self.dbref = dbref
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super().__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, str):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
@staticmethod
def _lazy_load_ref(ref_cls, dbref):
dereferenced_son = ref_cls._get_db().dereference(dbref)
if dereferenced_son is None:
raise DoesNotExist(f"Trying to dereference unknown document {dbref}")
return ref_cls._from_son(dereferenced_son)
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing."""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
ref_value = instance._data.get(self.name)
auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if auto_dereference and isinstance(ref_value, DBRef):
if hasattr(ref_value, "cls"):
# Dereference using the class type specified in the reference
cls = get_document(ref_value.cls)
else:
cls = self.document_type
instance._data[self.name] = self._lazy_load_ref(cls, ref_value)
return super().__get__(instance, owner)
def to_mongo(self, document):
if isinstance(document, DBRef):
if not self.dbref:
return document.id
return document
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
# XXX ValidationError raised outside of the "validate" method.
if id_ is None:
self.error(
"You can only reference documents once they have"
" been saved to the database"
)
# Use the attributes from the document instance, so that they
# override the attributes of this field's document type
cls = document
else:
id_ = document
cls = self.document_type
id_field_name = cls._meta["id_field"]
id_field = cls._fields[id_field_name]
id_ = id_field.to_mongo(id_)
if self.document_type._meta.get("abstract"):
collection = cls._get_collection_name()
return DBRef(collection, id_, cls=cls._class_name)
elif self.dbref:
collection = cls._get_collection_name()
return DBRef(collection, id_)
return id_
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if not self.dbref and not isinstance(
value, (DBRef, Document, EmbeddedDocument)
):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
super().prepare_query_value(op, value)
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, (self.document_type, LazyReference, DBRef, ObjectId)):
self.error(
"A ReferenceField only accepts DBRef, LazyReference, ObjectId or documents"
)
if isinstance(value, Document) and value.id is None:
self.error(
"You can only reference documents once they have been "
"saved to the database"
)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
class CachedReferenceField(BaseField):
"""A referencefield with cache fields to purpose pseudo-joins"""
def __init__(self, document_type, fields=None, auto_sync=True, **kwargs):
"""Initialises the Cached Reference Field.
:param document_type: The type of Document that will be referenced
:param fields: A list of fields to be cached in document
:param auto_sync: if True documents are auto updated
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
if fields is None:
fields = []
# XXX ValidationError raised outside of the "validate" method.
if not isinstance(document_type, str) and not (
inspect.isclass(document_type) and issubclass(document_type, Document)
):
self.error(
"Argument to CachedReferenceField constructor must be a"
" document class or a string"
)
self.auto_sync = auto_sync
self.document_type_obj = document_type
self.fields = fields
super().__init__(**kwargs)
def start_listener(self):
from mongoengine import signals
signals.post_save.connect(self.on_document_pre_save, sender=self.document_type)
def on_document_pre_save(self, sender, document, created, **kwargs):
if created:
return None
update_kwargs = {
f"set__{self.name}__{key}": val
for key, val in document._delta()[0].items()
if key in self.fields
}
if update_kwargs:
filter_kwargs = {}
filter_kwargs[self.name] = document
self.owner_document.objects(**filter_kwargs).update(**update_kwargs)
def to_python(self, value):
if isinstance(value, dict):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value["_id"]))
return self.document_type._from_son(
self.document_type._get_db().dereference(value)
)
return value
@property
def document_type(self):
if isinstance(self.document_type_obj, str):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
@staticmethod
def _lazy_load_ref(ref_cls, dbref):
dereferenced_son = ref_cls._get_db().dereference(dbref)
if dereferenced_son is None:
raise DoesNotExist(f"Trying to dereference unknown document {dbref}")
return ref_cls._from_son(dereferenced_son)
def __get__(self, instance, owner):
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if auto_dereference and isinstance(value, DBRef):
instance._data[self.name] = self._lazy_load_ref(self.document_type, value)
return super().__get__(instance, owner)
def to_mongo(self, document, use_db_field=True, fields=None):
id_field_name = self.document_type._meta["id_field"]
id_field = self.document_type._fields[id_field_name]
# XXX ValidationError raised outside of the "validate" method.
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error(
"You can only reference documents once they have"
" been saved to the database"
)
else:
self.error("Only accept a document object")
value = SON((("_id", id_field.to_mongo(id_)),))
if fields:
new_fields = [f for f in self.fields if f in fields]
else:
new_fields = self.fields
value.update(dict(document.to_mongo(use_db_field, fields=new_fields)))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
# XXX ValidationError raised outside of the "validate" method.
if isinstance(value, Document):
if value.pk is None:
self.error(
"You can only reference documents once they have"
" been saved to the database"
)
value_dict = {"_id": value.pk}
for field in self.fields:
value_dict.update({field: value[field]})
return value_dict
raise NotImplementedError
def validate(self, value):
if not isinstance(value, self.document_type):
self.error("A CachedReferenceField only accepts documents")
if isinstance(value, Document) and value.id is None:
self.error(
"You can only reference documents once they have been "
"saved to the database"
)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def sync_all(self):
"""
Sync all cached fields on demand.
Caution: this operation may be slower.
"""
update_key = "set__%s" % self.name
for doc in self.document_type.objects:
filter_kwargs = {}
filter_kwargs[self.name] = doc
update_kwargs = {}
update_kwargs[update_key] = doc
self.owner_document.objects(**filter_kwargs).update(**update_kwargs)
class GenericReferenceField(BaseField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass
that will be automatically dereferenced on access (lazily).
Note this field works the same way as :class:`~mongoengine.document.ReferenceField`,
doing database I/O access the first time it is accessed (even if it's to access
it ``pk`` or ``id`` field).
To solve this you should consider using the
:class:`~mongoengine.fields.GenericLazyReferenceField`.
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
"""
def __init__(self, *args, **kwargs):
choices = kwargs.pop("choices", None)
super().__init__(*args, **kwargs)
self.choices = []
# Keep the choices as a list of allowed Document class names
if choices:
for choice in choices:
if isinstance(choice, str):
self.choices.append(choice)
elif isinstance(choice, type) and issubclass(choice, Document):
self.choices.append(choice._class_name)
else:
# XXX ValidationError raised outside of the "validate"
# method.
self.error(
"Invalid choices provided: must be a list of"
"Document subclasses and/or str"
)
def _validate_choices(self, value):
if isinstance(value, dict):
# If the field has not been dereferenced, it is still a dict
# of class and DBRef
value = value.get("_cls")
elif isinstance(value, Document):
value = value._class_name
super()._validate_choices(value)
@staticmethod
def _lazy_load_ref(ref_cls, dbref):
dereferenced_son = ref_cls._get_db().dereference(dbref)
if dereferenced_son is None:
raise DoesNotExist(f"Trying to dereference unknown document {dbref}")
return ref_cls._from_son(dereferenced_son)
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
auto_dereference = instance._fields[self.name]._auto_dereference
if auto_dereference and isinstance(value, dict):
doc_cls = get_document(value["_cls"])
instance._data[self.name] = self._lazy_load_ref(doc_cls, value["_ref"])
return super().__get__(instance, owner)
def validate(self, value):
if not isinstance(value, (Document, DBRef, dict, SON)):
self.error("GenericReferences can only contain documents")
if isinstance(value, (dict, SON)):
if "_ref" not in value or "_cls" not in value:
self.error("GenericReferences can only contain documents")
# We need the id from the saved object to create the DBRef
elif isinstance(value, Document) and value.id is None:
self.error(
"You can only reference documents once they have been"
" saved to the database"
)
def to_mongo(self, document):
if document is None:
return None
if isinstance(document, (dict, SON, ObjectId, DBRef)):
return document
id_field_name = document.__class__._meta["id_field"]
id_field = document.__class__._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.id
if id_ is None:
# XXX ValidationError raised outside of the "validate" method.
self.error(
"You can only reference documents once they have"
" been saved to the database"
)
else:
id_ = document
id_ = id_field.to_mongo(id_)
collection = document._get_collection_name()
ref = DBRef(collection, id_)
return SON((("_cls", document._class_name), ("_ref", ref)))
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
class BinaryField(BaseField):
"""A binary data field."""
def __init__(self, max_bytes=None, **kwargs):
self.max_bytes = max_bytes
super().__init__(**kwargs)
def __set__(self, instance, value):
"""Handle bytearrays in python 3.1"""
if isinstance(value, bytearray):
value = bytes(value)
return super().__set__(instance, value)
def to_mongo(self, value):
return Binary(value)
def validate(self, value):
if not isinstance(value, (bytes, Binary)):
self.error(
"BinaryField only accepts instances of "
"(%s, %s, Binary)" % (bytes.__name__, Binary.__name__)
)
if self.max_bytes is not None and len(value) > self.max_bytes:
self.error("Binary value is too long")
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, self.to_mongo(value))
class EnumField(BaseField):
"""Enumeration Field. Values are stored underneath as is,
so it will only work with simple types (str, int, etc) that
are bson encodable
Example usage:
.. code-block:: python
class Status(Enum):
NEW = 'new'
ONGOING = 'ongoing'
DONE = 'done'
class ModelWithEnum(Document):
status = EnumField(Status, default=Status.NEW)
ModelWithEnum(status='done')
ModelWithEnum(status=Status.DONE)
Enum fields can be searched using enum or its value:
.. code-block:: python
ModelWithEnum.objects(status='new').count()
ModelWithEnum.objects(status=Status.NEW).count()
The values can be restricted to a subset of the enum by using the ``choices`` parameter:
.. code-block:: python
class ModelWithEnum(Document):
status = EnumField(Status, choices=[Status.NEW, Status.DONE])
"""
def __init__(self, enum, **kwargs):
self._enum_cls = enum
if kwargs.get("choices"):
invalid_choices = []
for choice in kwargs["choices"]:
if not isinstance(choice, enum):
invalid_choices.append(choice)
if invalid_choices:
raise ValueError("Invalid choices: %r" % invalid_choices)
else:
kwargs["choices"] = list(self._enum_cls) # Implicit validator
super().__init__(**kwargs)
def __set__(self, instance, value):
is_legal_value = value is None or isinstance(value, self._enum_cls)
if not is_legal_value:
try:
value = self._enum_cls(value)
except Exception:
pass
return super().__set__(instance, value)
def to_mongo(self, value):
if isinstance(value, self._enum_cls):
return value.value
return value
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, self.to_mongo(value))
class GridFSError(Exception):
pass
class GridFSProxy:
"""Proxy object to handle writing and reading of files to and from GridFS"""
_fs = None
def __init__(
self,
grid_id=None,
key=None,
instance=None,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name="fs",
):
self.grid_id = grid_id # Store GridFS id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = (
"_fs",
"grid_id",
"key",
"instance",
"db_alias",
"collection_name",
"newfile",
"gridout",
)
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if hasattr(obj, name):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __bool__(self):
return bool(self.grid_id)
def __getstate__(self):
self_dict = self.__dict__
self_dict["_fs"] = None
return self_dict
def __copy__(self):
copied = GridFSProxy()
copied.__dict__.update(self.__getstate__())
return copied
def __deepcopy__(self, memo):
return self.__copy__()
def __repr__(self):
return f"<{self.__class__.__name__}: {self.grid_id}>"
def __str__(self):
gridout = self.get()
filename = gridout.filename if gridout else "<no file>"
return f"<{self.__class__.__name__}: {filename} ({self.grid_id})>"
def __eq__(self, other):
if isinstance(other, GridFSProxy):
return (
(self.grid_id == other.grid_id)
and (self.collection_name == other.collection_name)
and (self.db_alias == other.db_alias)
)
else:
return False
def __ne__(self, other):
return not self == other
@property
def fs(self):
if not self._fs:
self._fs = gridfs.GridFS(get_db(self.db_alias), self.collection_name)
return self._fs
def get(self, grid_id=None):
if grid_id:
self.grid_id = grid_id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = self.fs.get(self.grid_id)
return self.gridout
except Exception:
# File has been deleted
return None
def new_file(self, **kwargs):
self.newfile = self.fs.new_file(**kwargs)
self.grid_id = self.newfile._id
self._mark_as_changed()
def put(self, file_obj, **kwargs):
if self.grid_id:
raise GridFSError(
"This document already has a file. Either delete "
"it or call replace to overwrite it"
)
self.grid_id = self.fs.put(file_obj, **kwargs)
self._mark_as_changed()
def write(self, string):
if self.grid_id:
if not self.newfile:
raise GridFSError(
"This document already has a file. Either "
"delete it or call replace to overwrite it"
)
else:
self.new_file()
self.newfile.write(string)
def writelines(self, lines):
if not self.newfile:
self.new_file()
self.grid_id = self.newfile._id
self.newfile.writelines(lines)
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except Exception:
return ""
def delete(self):
# Delete file from GridFS, FileField still remains
self.fs.delete(self.grid_id)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def replace(self, file_obj, **kwargs):
self.delete()
self.put(file_obj, **kwargs)
def close(self):
if self.newfile:
self.newfile.close()
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class FileField(BaseField):
"""A GridFS storage field."""
proxy_class = GridFSProxy
def __init__(
self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs
):
super().__init__(**kwargs)
self.collection_name = collection_name
self.db_alias = db_alias
def __get__(self, instance, owner):
if instance is None:
return self
# Check if a file already exists for this model
grid_file = instance._data.get(self.name)
if not isinstance(grid_file, self.proxy_class):
grid_file = self.get_proxy_obj(key=self.name, instance=instance)
instance._data[self.name] = grid_file
if not grid_file.key:
grid_file.key = self.name
grid_file.instance = instance
return grid_file
def __set__(self, instance, value):
key = self.name
if (
hasattr(value, "read") and not isinstance(value, GridFSProxy)
) or isinstance(value, (bytes, str)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except Exception:
pass
# Create a new proxy object as we don't already have one
instance._data[key] = self.get_proxy_obj(key=key, instance=instance)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def get_proxy_obj(self, key, instance, db_alias=None, collection_name=None):
if db_alias is None:
db_alias = self.db_alias
if collection_name is None:
collection_name = self.collection_name
return self.proxy_class(
key=key,
instance=instance,
db_alias=db_alias,
collection_name=collection_name,
)
def to_mongo(self, value):
# Store the GridFS file id in MongoDB
if isinstance(value, self.proxy_class) and value.grid_id is not None:
return value.grid_id
return None
def to_python(self, value):
if value is not None:
return self.proxy_class(
value, collection_name=self.collection_name, db_alias=self.db_alias
)
def validate(self, value):
if value.grid_id is not None:
if not isinstance(value, self.proxy_class):
self.error("FileField only accepts GridFSProxy values")
if not isinstance(value.grid_id, ObjectId):
self.error("Invalid GridFSProxy value")
class ImageGridFsProxy(GridFSProxy):
"""Proxy for ImageField"""
def put(self, file_obj, **kwargs):
"""
Insert a image in database
applying field properties (size, thumbnail_size)
"""
field = self.instance._fields[self.key]
# Handle nested fields
if hasattr(field, "field") and isinstance(field.field, FileField):
field = field.field
try:
img = Image.open(file_obj)
img_format = img.format
except Exception as e:
raise ValidationError("Invalid image: %s" % e)
# Progressive JPEG
# TODO: fixme, at least unused, at worst bad implementation
progressive = img.info.get("progressive") or False
if (
kwargs.get("progressive")
and isinstance(kwargs.get("progressive"), bool)
and img_format == "JPEG"
):
progressive = True
else:
progressive = False
if field.size and (
img.size[0] > field.size["width"] or img.size[1] > field.size["height"]
):
size = field.size
if size["force"]:
img = ImageOps.fit(
img, (size["width"], size["height"]), Image.ANTIALIAS
)
else:
img.thumbnail((size["width"], size["height"]), Image.ANTIALIAS)
thumbnail = None
if field.thumbnail_size:
size = field.thumbnail_size
if size["force"]:
thumbnail = ImageOps.fit(
img, (size["width"], size["height"]), Image.ANTIALIAS
)
else:
thumbnail = img.copy()
thumbnail.thumbnail((size["width"], size["height"]), Image.ANTIALIAS)
if thumbnail:
thumb_id = self._put_thumbnail(thumbnail, img_format, progressive)
else:
thumb_id = None
w, h = img.size
io = BytesIO()
img.save(io, img_format, progressive=progressive)
io.seek(0)
return super().put(
io, width=w, height=h, format=img_format, thumbnail_id=thumb_id, **kwargs
)
def delete(self, *args, **kwargs):
# deletes thumbnail
out = self.get()
if out and out.thumbnail_id:
self.fs.delete(out.thumbnail_id)
return super().delete()
def _put_thumbnail(self, thumbnail, format, progressive, **kwargs):
w, h = thumbnail.size
io = BytesIO()
thumbnail.save(io, format, progressive=progressive)
io.seek(0)
return self.fs.put(io, width=w, height=h, format=format, **kwargs)
@property
def size(self):
"""
return a width, height of image
"""
out = self.get()
if out:
return out.width, out.height
@property
def format(self):
"""
return format of image
ex: PNG, JPEG, GIF, etc
"""
out = self.get()
if out:
return out.format
@property
def thumbnail(self):
"""
return a gridfs.grid_file.GridOut
representing a thumbnail of Image
"""
out = self.get()
if out and out.thumbnail_id:
return self.fs.get(out.thumbnail_id)
def write(self, *args, **kwargs):
raise RuntimeError('Please use "put" method instead')
def writelines(self, *args, **kwargs):
raise RuntimeError('Please use "put" method instead')
class ImproperlyConfigured(Exception):
pass
class ImageField(FileField):
"""
A Image File storage field.
:param size: max size to store images, provided as (width, height, force)
if larger, it will be automatically resized (ex: size=(800, 600, True))
:param thumbnail_size: size to generate a thumbnail, provided as (width, height, force)
"""
proxy_class = ImageGridFsProxy
def __init__(
self, size=None, thumbnail_size=None, collection_name="images", **kwargs
):
if not Image:
raise ImproperlyConfigured("PIL library was not found")
params_size = ("width", "height", "force")
extra_args = {"size": size, "thumbnail_size": thumbnail_size}
for att_name, att in extra_args.items():
value = None
if isinstance(att, (tuple, list)):
value = dict(itertools.zip_longest(params_size, att, fillvalue=None))
setattr(self, att_name, value)
super().__init__(collection_name=collection_name, **kwargs)
class SequenceField(BaseField):
"""Provides a sequential counter see:
https://docs.mongodb.com/manual/reference/method/ObjectId/#ObjectIDs-SequenceNumbers
.. note::
Although traditional databases often use increasing sequence
numbers for primary keys. In MongoDB, the preferred approach is to
use Object IDs instead. The concept is that in a very large
cluster of machines, it is easier to create an object ID than have
global, uniformly increasing sequence numbers.
:param collection_name: Name of the counter collection (default 'mongoengine.counters')
:param sequence_name: Name of the sequence in the collection (default 'ClassName.counter')
:param value_decorator: Any callable to use as a counter (default int)
Use any callable as `value_decorator` to transform calculated counter into
any value suitable for your needs, e.g. string or hexadecimal
representation of the default integer counter value.
.. note::
In case the counter is defined in the abstract document, it will be
common to all inherited documents and the default sequence name will
be the class name of the abstract document.
"""
_auto_gen = True
COLLECTION_NAME = "mongoengine.counters"
VALUE_DECORATOR = int
def __init__(
self,
collection_name=None,
db_alias=None,
sequence_name=None,
value_decorator=None,
*args,
**kwargs,
):
self.collection_name = collection_name or self.COLLECTION_NAME
self.db_alias = db_alias or DEFAULT_CONNECTION_NAME
self.sequence_name = sequence_name
self.value_decorator = (
value_decorator if callable(value_decorator) else self.VALUE_DECORATOR
)
super().__init__(*args, **kwargs)
def generate(self):
"""
Generate and Increment the counter
"""
sequence_name = self.get_sequence_name()
sequence_id = f"{sequence_name}.{self.name}"
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_one_and_update(
filter={"_id": sequence_id},
update={"$inc": {"next": 1}},
return_document=ReturnDocument.AFTER,
upsert=True,
)
return self.value_decorator(counter["next"])
def set_next_value(self, value):
"""Helper method to set the next sequence value"""
sequence_name = self.get_sequence_name()
sequence_id = f"{sequence_name}.{self.name}"
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_one_and_update(
filter={"_id": sequence_id},
update={"$set": {"next": value}},
return_document=ReturnDocument.AFTER,
upsert=True,
)
return self.value_decorator(counter["next"])
def get_next_value(self):
"""Helper method to get the next value for previewing.
.. warning:: There is no guarantee this will be the next value
as it is only fixed on set.
"""
sequence_name = self.get_sequence_name()
sequence_id = f"{sequence_name}.{self.name}"
collection = get_db(alias=self.db_alias)[self.collection_name]
data = collection.find_one({"_id": sequence_id})
if data:
return self.value_decorator(data["next"] + 1)
return self.value_decorator(1)
def get_sequence_name(self):
if self.sequence_name:
return self.sequence_name
owner = self.owner_document
if issubclass(owner, Document) and not owner._meta.get("abstract"):
return owner._get_collection_name()
else:
return (
"".join("_%s" % c if c.isupper() else c for c in owner._class_name)
.strip("_")
.lower()
)
def __get__(self, instance, owner):
value = super().__get__(instance, owner)
if value is None and instance._initialised:
value = self.generate()
instance._data[self.name] = value
instance._mark_as_changed(self.name)
return value
def __set__(self, instance, value):
if value is None and instance._initialised:
value = self.generate()
return super().__set__(instance, value)
def prepare_query_value(self, op, value):
"""
This method is overridden in order to convert the query value into to required
type. We need to do this in order to be able to successfully compare query
values passed as string, the base implementation returns the value as is.
"""
return self.value_decorator(value)
def to_python(self, value):
if value is None:
value = self.generate()
return value
class UUIDField(BaseField):
"""A UUID field."""
_binary = None
def __init__(self, binary=True, **kwargs):
"""
Store UUID data in the database
:param binary: if False store as a string.
"""
self._binary = binary
super().__init__(**kwargs)
def to_python(self, value):
if not self._binary:
original_value = value
try:
if not isinstance(value, str):
value = str(value)
return uuid.UUID(value)
except (ValueError, TypeError, AttributeError):
return original_value
return value
def to_mongo(self, value):
if not self._binary:
return str(value)
elif isinstance(value, str):
return uuid.UUID(value)
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, uuid.UUID):
if not isinstance(value, str):
value = str(value)
try:
uuid.UUID(value)
except (ValueError, TypeError, AttributeError) as exc:
self.error("Could not convert to UUID: %s" % exc)
class GeoPointField(BaseField):
"""A list storing a longitude and latitude coordinate.
.. note:: this represents a generic point in a 2D plane and a legacy way of
representing a geo point. It admits 2d indexes but not "2dsphere" indexes
in MongoDB > 2.4 which are more natural for modeling geospatial points.
See :ref:`geospatial-indexes`
"""
_geo_index = pymongo.GEO2D
def validate(self, value):
"""Make sure that a geo-value is of type (x, y)"""
if not isinstance(value, (list, tuple)):
self.error("GeoPointField can only accept tuples or lists of (x, y)")
if not len(value) == 2:
self.error("Value (%s) must be a two-dimensional point" % repr(value))
elif not isinstance(value[0], (float, int)) or not isinstance(
value[1], (float, int)
):
self.error("Both values (%s) in point must be float or int" % repr(value))
class PointField(GeoJsonBaseField):
"""A GeoJSON field storing a longitude and latitude coordinate.
The data is represented as:
.. code-block:: js
{'type' : 'Point' ,
'coordinates' : [x, y]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.4
"""
_type = "Point"
class LineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a line of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{'type' : 'LineString' ,
'coordinates' : [[x1, y1], [x2, y2] ... [xn, yn]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.4
"""
_type = "LineString"
class PolygonField(GeoJsonBaseField):
"""A GeoJSON field storing a polygon of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{'type' : 'Polygon' ,
'coordinates' : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list
of LineStrings. The first LineString being the outside and the rest being
holes.
Requires mongodb >= 2.4
"""
_type = "Polygon"
class MultiPointField(GeoJsonBaseField):
"""A GeoJSON field storing a list of Points.
The data is represented as:
.. code-block:: js
{'type' : 'MultiPoint' ,
'coordinates' : [[x1, y1], [x2, y2]]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.6
"""
_type = "MultiPoint"
class MultiLineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a list of LineStrings.
The data is represented as:
.. code-block:: js
{'type' : 'MultiLineString' ,
'coordinates' : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.6
"""
_type = "MultiLineString"
class MultiPolygonField(GeoJsonBaseField):
"""A GeoJSON field storing list of Polygons.
The data is represented as:
.. code-block:: js
{'type' : 'MultiPolygon' ,
'coordinates' : [[
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
], [
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
]
}
You can either pass a dict with the full information or a list
of Polygons.
Requires mongodb >= 2.6
"""
_type = "MultiPolygon"
class LazyReferenceField(BaseField):
"""A really lazy reference to a document.
Unlike the :class:`~mongoengine.fields.ReferenceField` it will
**not** be automatically (lazily) dereferenced on access.
Instead, access will return a :class:`~mongoengine.base.LazyReference` class
instance, allowing access to `pk` or manual dereference by using
``fetch()`` method.
"""
def __init__(
self,
document_type,
passthrough=False,
dbref=False,
reverse_delete_rule=DO_NOTHING,
**kwargs,
):
"""Initialises the Reference Field.
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.id .
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
:param passthrough: When trying to access unknown fields, the
:class:`~mongoengine.base.datastructure.LazyReference` instance will
automatically call `fetch()` and try to retrieve the field on the fetched
document. Note this only work getting field (not setting or deleting).
"""
# XXX ValidationError raised outside of the "validate" method.
if not isinstance(document_type, str) and not issubclass(
document_type, Document
):
self.error(
"Argument to LazyReferenceField constructor must be a "
"document class or a string"
)
self.dbref = dbref
self.passthrough = passthrough
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super().__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, str):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def build_lazyref(self, value):
if isinstance(value, LazyReference):
if value.passthrough != self.passthrough:
value = LazyReference(
value.document_type, value.pk, passthrough=self.passthrough
)
elif value is not None:
if isinstance(value, self.document_type):
value = LazyReference(
self.document_type, value.pk, passthrough=self.passthrough
)
elif isinstance(value, DBRef):
value = LazyReference(
self.document_type, value.id, passthrough=self.passthrough
)
else:
# value is the primary key of the referenced document
value = LazyReference(
self.document_type, value, passthrough=self.passthrough
)
return value
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing."""
if instance is None:
# Document class being used rather than a document object
return self
value = self.build_lazyref(instance._data.get(self.name))
if value:
instance._data[self.name] = value
return super().__get__(instance, owner)
def to_mongo(self, value):
if isinstance(value, LazyReference):
pk = value.pk
elif isinstance(value, self.document_type):
pk = value.pk
elif isinstance(value, DBRef):
pk = value.id
else:
# value is the primary key of the referenced document
pk = value
id_field_name = self.document_type._meta["id_field"]
id_field = self.document_type._fields[id_field_name]
pk = id_field.to_mongo(pk)
if self.dbref:
return DBRef(self.document_type._get_collection_name(), pk)
else:
return pk
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if not isinstance(value, (DBRef, Document, EmbeddedDocument)):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
value = self.build_lazyref(value)
return value
def validate(self, value):
if isinstance(value, LazyReference):
if value.collection != self.document_type._get_collection_name():
self.error("Reference must be on a `%s` document." % self.document_type)
pk = value.pk
elif isinstance(value, self.document_type):
pk = value.pk
elif isinstance(value, DBRef):
# TODO: check collection ?
collection = self.document_type._get_collection_name()
if value.collection != collection:
self.error("DBRef on bad collection (must be on `%s`)" % collection)
pk = value.id
else:
# value is the primary key of the referenced document
id_field_name = self.document_type._meta["id_field"]
id_field = getattr(self.document_type, id_field_name)
pk = value
try:
id_field.validate(pk)
except ValidationError:
self.error(
"value should be `{0}` document, LazyReference or DBRef on `{0}` "
"or `{0}`'s primary key (i.e. `{1}`)".format(
self.document_type.__name__, type(id_field).__name__
)
)
if pk is None:
self.error(
"You can only reference documents once they have been "
"saved to the database"
)
def prepare_query_value(self, op, value):
if value is None:
return None
super().prepare_query_value(op, value)
return self.to_mongo(value)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
class GenericLazyReferenceField(GenericReferenceField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass.
Unlike the :class:`~mongoengine.fields.GenericReferenceField` it will
**not** be automatically (lazily) dereferenced on access.
Instead, access will return a :class:`~mongoengine.base.LazyReference` class
instance, allowing access to `pk` or manual dereference by using
``fetch()`` method.
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
"""
def __init__(self, *args, **kwargs):
self.passthrough = kwargs.pop("passthrough", False)
super().__init__(*args, **kwargs)
def _validate_choices(self, value):
if isinstance(value, LazyReference):
value = value.document_type._class_name
super()._validate_choices(value)
def build_lazyref(self, value):
if isinstance(value, LazyReference):
if value.passthrough != self.passthrough:
value = LazyReference(
value.document_type, value.pk, passthrough=self.passthrough
)
elif value is not None:
if isinstance(value, (dict, SON)):
value = LazyReference(
get_document(value["_cls"]),
value["_ref"].id,
passthrough=self.passthrough,
)
elif isinstance(value, Document):
value = LazyReference(
type(value), value.pk, passthrough=self.passthrough
)
return value
def __get__(self, instance, owner):
if instance is None:
return self
value = self.build_lazyref(instance._data.get(self.name))
if value:
instance._data[self.name] = value
return super().__get__(instance, owner)
def validate(self, value):
if isinstance(value, LazyReference) and value.pk is None:
self.error(
"You can only reference documents once they have been"
" saved to the database"
)
return super().validate(value)
def to_mongo(self, document):
if document is None:
return None
if isinstance(document, LazyReference):
return SON(
(
("_cls", document.document_type._class_name),
(
"_ref",
DBRef(
document.document_type._get_collection_name(), document.pk
),
),
)
)
else:
return super().to_mongo(document)
|
MongoEngine/mongoengine
|
mongoengine/fields.py
|
Python
|
mit
| 90,689 | 0.001301 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-21 14:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0007_auto_20161221_1337'),
]
operations = [
migrations.AddField(
model_name='cart',
name='tax_percentage',
field=models.DecimalField(decimal_places=3, default=0.085, max_digits=20),
),
]
|
michel-rodrigues/ecommerce2
|
source/carts/migrations/0008_cart_tax_percentage.py
|
Python
|
gpl-3.0
| 489 | 0.002045 |
from .downloader_base import DownloaderBase
from ... import logger
log = logger.get(__name__)
import traceback
import json
from urllib import request, error
try:
import ssl
SSL = True
except ImportError:
SSL = False
def is_available():
return SSL
class UrllibDownloader(DownloaderBase):
"""Downloader that uses the native Python HTTP library.
Does not verify HTTPS certificates... """
def get(self, url):
try:
log.debug('Urllib downloader getting url %s', url)
result = request.urlopen(url)
except error.URLError as e:
log.error('Urllib downloader failed: %s' % e.reason)
traceback.print_exc()
result = b''
if result.getcode() >= 400:
return b''
return result.read()
def get_json(self, url):
a = self.get(url)
if a:
try:
a = json.loads(a.decode('utf-8'))
except ValueError:
log.error('URL %s does not contain a JSON file.', url)
return False
return a
def get_file(self, url):
return self.get(url)
|
blopker/Color-Switch
|
colorswitch/http/downloaders/urllib.py
|
Python
|
mit
| 1,149 | 0.002611 |
import gpi
class ExternalNode(gpi.NodeAPI):
"""This Node provides allows the user to make a DelaySequence for Jemris."""
def initUI(self):
# Widgets
self.delay_labels = ['Name', 'Observe', 'ADCs', 'Aux1', 'Aux2', 'Aux3', 'Delay', 'DelayType', 'HardwareMode',
'PhaseLock', 'StartSeq', 'StopSeq', 'Vector']
[self.addWidget('StringBox', label) for label in self.delay_labels]
self.addWidget('PushButton', 'ComputeEvents', button_title="Compute events")
# IO Ports
self.addOutPort('DelaySequence', 'LIST')
return 0
def compute(self):
if 'ComputeEvents' in self.widgetEvents() or '_INIT_EVENT_' in self.getEvents():
delay_seq = {'DelaySequence': True}
for label in self.delay_labels:
if self.getVal(label) != '':
delay_seq[label] = self.getVal(label)
self.setData('DelaySequence', [delay_seq])
return 0
|
sravan953/pulseq-gpi
|
pulseq2jemris/jemris_nodes/JMakeDelaySequence_GPI.py
|
Python
|
gpl-3.0
| 990 | 0.00404 |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import csv
from dateutil.parser import parse
def write_problem(steps, problem_views, kc_ops, row_count, kc_model_names,
out):
# variable to store rolled up steps
rollup = []
for s in steps:
# sort transactions within a step by time (should be sorted already,
# but just in case)
steps[s].sort(key=lambda x: x['time'])
# update variables for first attempt
student = steps[s][0]['anon student id']
problem_name = steps[s][0]['problem name']
step_name = s
step_start_time = steps[s][0]['time']
first_transaction_time = steps[s][0]['time']
correct_transaction_time = ""
step_end_time = steps[s][0]['time']
first_attempt = steps[s][0]['outcome'].lower()
incorrects = 0
corrects = 0
hints = 0
kc_sets = {kc_mod: set() for kc_mod in kc_model_names}
# update variables for non-first attempt transactions
for t in steps[s]:
step_end_time = t['time']
if t['outcome'].lower() == 'correct':
correct_transaction_time = t['time']
corrects += 1
elif t['outcome'].lower() == 'incorrect':
incorrects += 1
elif t['outcome'].lower() == 'hint':
hints += 1
for kc_mod in kc_model_names:
for kc in t[kc_mod].split("~~"):
kc_sets[kc_mod].add(kc)
# for each rolled up step, we need to increment the KC counts.
kc_to_write = []
for kc_mod in kc_model_names:
model_name = kc_mod[4:-1]
kcs = list(kc_sets[kc_mod])
kc_to_write.append("~~".join(kcs))
if model_name not in kc_ops:
kc_ops[model_name] = {}
ops = []
for kc in kcs:
if kc not in kc_ops[model_name]:
kc_ops[model_name][kc] = 0
kc_ops[model_name][kc] += 1
ops.append(str(kc_ops[model_name][kc]))
kc_to_write.append("~~".join(ops))
# add rolled up step to rollup
rolled_up_step = [str(row_count),
student,
problem_name,
str(problem_views),
step_name,
step_start_time,
first_transaction_time,
correct_transaction_time,
step_end_time,
first_attempt,
str(incorrects),
str(corrects),
str(hints)]
rolled_up_step.extend(kc_to_write)
row_count += 1
rollup.append(rolled_up_step)
# sort the rolled up steps by step start time
rollup.sort(key=lambda x: x[5])
for line_to_write in rollup:
out.write('\t'.join(line_to_write)+'\n')
return row_count
def transaction_to_student_step(datashop_file):
out_file = datashop_file.name[:-4]+'-rollup.txt'
students = {}
header = None
for row in csv.reader(datashop_file, delimiter='\t'):
if header is None:
header = row
continue
line = {}
kc_mods = {}
for i, h in enumerate(header):
if h[:4] == 'KC (':
line[h] = row[i]
if h not in kc_mods:
kc_mods[h] = []
if line[h] != "":
kc_mods[h].append(line[h])
continue
else:
h = h.lower()
line[h] = row[i]
if 'step name' in line:
pass
elif 'selection' in line and 'action' in line:
line['step name'] = line['selection'] + ' ' + line['action']
else:
raise Exception(
'No fields present to make step names, either add a "Step'
' Name" column or "Selection" and "Action" columns.')
if 'step name' in line and 'problem name' in line:
line['prob step'] = line['problem name'] + ' ' + line['step name']
for km in kc_mods:
line[km] = '~~'.join(kc_mods[km])
if line['anon student id'] not in students:
students[line['anon student id']] = []
students[line['anon student id']].append(line)
kc_model_names = list(set(kc_mods))
row_count = 0
with open(out_file, 'w') as out:
new_head = ['Row',
'Anon Student Id',
'Problem Name',
'Problem View',
'Step Name',
'Step Start Time',
'First Transaction Time',
'Correct Transaction Time',
'Step End Time',
'First Attempt',
'Incorrects',
'Corrects',
'Hints', ]
out.write('\t'.join(new_head))
for km in kc_model_names:
out.write('\t'+km+'\tOpportunity ('+km[4:])
out.write('\n')
stu_list = list(students.keys())
sorted(stu_list)
for stu in stu_list:
transactions = students[stu]
transactions = sorted(transactions, key=lambda k: parse(k['time']))
problem_views = {}
kc_ops = {}
row_count = 0
steps = {}
problem_name = ""
# Start iterating through the stuff.
for i, t in enumerate(transactions):
if problem_name != t['problem name']:
# we don't need to write the first row, because we don't
# have anything yet.
if i != 0:
if problem_name not in problem_views:
problem_views[problem_name] = 0
problem_views[problem_name] += 1
row_count = write_problem(steps,
problem_views[problem_name],
kc_ops, row_count,
kc_model_names, out)
steps = {}
if t['step name'] not in steps:
steps[t['step name']] = []
steps[t['step name']].append(t)
problem_name = t['problem name']
# need to write the last problem
if problem_name not in problem_views:
problem_views[problem_name] = 0
problem_views[problem_name] += 1
row_count = write_problem(steps, problem_views[problem_name],
kc_ops, row_count, kc_model_names, out)
steps = {}
print('transaction file rolled up into:', out_file)
return out_file
|
cmaclell/pyAFM
|
pyafm/roll_up.py
|
Python
|
mit
| 7,059 | 0 |
#!/usr/bin/env python
from util import nodeenv_delegate
from setup import setup
if __name__ == "__main__":
setup(skip_dependencies=True)
nodeenv_delegate("npx")
|
outoftime/learnpad
|
tools/npx.py
|
Python
|
mit
| 171 | 0 |
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
if n <= 0:
return []
if n == 1:
return [[1]]
matrix = [[None] * n for _ in range(n)]
x = y = 0
direction = [(0,1),(1,0),(0,-1),(-1,0)]
count = 1
l = 0
r = n-1
u = 0
d = n-1
dc = 0
while l <= r or u <= d:
if l <= x <= r and u <= y <= d:
matrix[y][x] = count
count += 1
y += direction[dc&3][0]
x += direction[dc&3][1]
elif x > r:
u += 1
x -= 1
y += 1
dc += 1
elif y > d:
r -= 1
y -= 1
x -= 1
dc +=1
elif x < l:
d -= 1
x += 1
y -= 1
dc += 1
elif y < u:
l += 1
y += 1
x += 1
dc += 1
return matrix
|
tedye/leetcode
|
Python/leetcode.059.spiral-matrix-ii.py
|
Python
|
mit
| 1,134 | 0.013228 |
# -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
sys.path.append(os.path.abspath('./..'))
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
elif 13<=yade.runtime.ipython_version<200:
import ipython_directive013 as id
else:
import ipython_directive200 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
elif 13<=yade.runtime.ipython_version<200:
extensions.append('ipython_directive013')
else:
extensions.append('ipython_directive200')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Reference Manual}}
\hypersetup{pdfauthor={Vaclav Smilauer; Emanuele Catalano; Bruno Chareyre; Sergei Dorofeenko; Jerome Duriez; Nolan Dyck; Burak Er; Jan Elias; Alexander Eulitz; Anton Gladky; Christian Jakob; Francois Kneib; Janek Kozicki; Donia Marzougui; Raphael Maurin; Chiara Modenese; Luc Scholtes; Luc Sibille; Jan Stransky; Thomas Sweijen; Klaus Thoeni; Chao Yuan}}
\hypersetup{pdfkeywords={Discrete element method; dem; yade; documentation; manual; python; c++; git}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree-reference'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'Yade documentation 2nd ed.'
# The full version, including alpha/beta/rc tags.
release = 'Yade documentation 2nd ed.'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build','../_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Reference Manual}\\
\vspace{20 mm}
%\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, J\'er\^ome Duriez, Nolan Dyck, Jan Eliáš, Burak Er, Alexander Eulitz, Anton Gladky, Christian Jakob, Fran\c{c}ois Kneib, Janek Kozicki, Donia Marzougui, Rapha\"el Maurin, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Thomas Sweijen, Klaus Thoeni, Chao Yuan
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
\textit{\large Yade Documentation 2nd edition, 2015}\\
\textit{based on Yade 1.14.0}\\
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\large Citing this document:}\\
\v{S}milauer V. et al. (2015). Reference Manual. In:\textit{Yade Documentation 2nd ed.} doi:10.5281/zenodo.34045. http://yade-dem.org\\
See also http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree-reference', 'YadeReference.tex', u'Reference Manual',
u'Václav Šmilauer et al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
hbenniou/trunk
|
doc/sphinx/book/confReference.py
|
Python
|
gpl-2.0
| 26,536 | 0.030762 |
#!/usr/bin/python2.5
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bcp47languageparser import *
|
zarnold/transitfeed
|
extensions/googletransit/pybcp47/__init__.py
|
Python
|
apache-2.0
| 637 | 0 |
""" You've recently read "The Gold-Bug" by Edgar Allan Poe, and was so impressed by the cryptogram in it that
decided to try and decipher an encrypted text yourself. You asked your friend to encode a piece of text using
a substitution cipher, and now have an encryptedText that you'd like to decipher.
The encryption process in the story you read involves frequency analysis: it is known that letter 'e' is the
most frequent one in the English language, so it's pretty safe to assume that the most common character in the
encryptedText stands for 'e'. To begin with, implement a function that will find the most frequent character
in the given encryptedText.
Example
For encryptedText = "$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", the output should be
frequencyAnalysis(encryptedText) = 'C'.
Letter 'C' appears in the text more than any other character (4 times), which is why it is the answer.
""
from collections import Counter # "Counter" is what CodeFights asks for
def frequencyAnalysis(encryptedText):
return max(Counter(encryptedText), key=Counter(encryptedText).get) # CodeFights asks to change this line only
|
ntthuy11/CodeFights
|
Arcade/04_Python/07_CaravanOfCollections/frequencyAnalysis.py
|
Python
|
mit
| 1,131 | 0.000884 |
import json
with open('medalofhonor-old.json') as input:
data = json.load(input)
months = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12,
}
def force_int(num):
try:
return int(num)
except TypeError:
return -1
except ValueError:
return -1
def parse_locdate(record):
if ',' in record['Place / Date']:
try:
year, month, day = record['Place / Date'].rsplit(",", 1)[1].strip().split(" ")
year, day = int(year), int(day)
month = months[month]
return [year, month, day]
except KeyError:
return [int(record['Place / Date'][-4:]), -1, -1]
except (IndexError, ValueError) as e:
return [-1, -1, -1]
else:
return [-1, -1, -1]
def parse_birth(record):
if ',' in record['Born']:
date, location = record['Born'].split(",", 1)
try:
year, month, day = date.split(" ")
year, day = int(year), int(day)
month = months[month]
return [year, month, day, location]
except:
return [-1, -1, -1, record['Born']]
else:
try:
return [-1, -1, int(record['Born']), ""]
except ValueError:
return [-1, -1, -1, record['Born']]
from pprint import pprint
[parse_locdate(record) for record in data]
new_data = [
{
'name': record['name'].title(),
'death': record['Departed'] == 'Yes',
'awarded': {
'date': {
'year': parse_locdate(record)[2],
'month': parse_locdate(record)[1],
'day': parse_locdate(record)[0],
'full': '{}-{}-{}'.format(parse_locdate(record)[2], parse_locdate(record)[1], parse_locdate(record)[0])
},
'location': {
'latitude': force_int(record['location'].get('latitude', 0)),
'longitude': force_int(record['location'].get('longitude', 0)),
'name': record['location'].get('name', 'Unknown'),
},
'General Order number': force_int(record['G.O. Number']),
'citation': record['citation'],
'issued': record['Date of Issue'],
'accredited to': record['Accredited To']
},
'birth': {
'date': {
'year': parse_birth(record)[2],
'month': parse_birth(record)[1],
'day': parse_birth(record)[0],
},
'location name': parse_birth(record)[3]
},
'military record': {
'rank': record['Rank'],
'division': record['Division'],
'organization': record['Organization'],
'company': record['Company'],
'entered service at': record['Entered Service At']
},
'metadata': {
'link': record['link']
}
} for record in data]
new_data = list(sorted(new_data, key=lambda r: r['awarded']['issued']))
with open('medal_of_honor.json', 'w') as output:
json.dump(new_data, output)
|
RealTimeWeb/datasets
|
preprocess/medal_of_honor/fix.py
|
Python
|
gpl-2.0
| 3,268 | 0.004896 |
#!/usr/bin/env python
import sys
import re
import subprocess
import os
import optparse
import datetime
import inspect
import threading
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
VERSION = '1.0.0'
ALL_TESTS = []
LOGFILE = None
CURRENT_SUITE = 'default'
DEFAULT_TEST_TIMEOUT = 30
#See README for detailed info
#Start a new test suite
def DefSuite(suite):
global CURRENT_SUITE
CURRENT_SUITE = suite
#Define a test - to be called in the testsuite files
def DefTest(cmd, name, success_codes=None, timeout=None):
if success_codes is None:
success_codes = [0]
if name in set(t.name for t in ALL_TESTS):
raise NameError('The test name ''%s'' is already defined' % name)
if not timeout:
timeout = int(DEFAULT_TEST_TIMEOUT)
#Figure out the file and line where the test is defined
frame = inspect.stack()[1]
cwd = os.path.dirname(inspect.getfile(frame[0])) or './'
test_location = {'cwd': cwd,
'filename': inspect.getfile(frame[0]),
'lineno': frame[0].f_lineno
}
t = TestCase(test_location, cmd, name, CURRENT_SUITE, success_codes, timeout)
ALL_TESTS.append(t)
class SimpleEnum(object):
def __str__(self):
return self.__class__.__name__
def __eq__(self, other):
return self.__class__ == other.__class__
def __repr__(self):
return str(self)
class TestResult(object):
class PASS(SimpleEnum):
pass
class FAIL(SimpleEnum):
pass
class TIMEDOUT(SimpleEnum):
pass
class NOTRUN(SimpleEnum):
pass
class TestFailure(object):
def __init__(self, test, msg):
self.test_name = test.name
self.result = test.result
self.msg = msg
def __str__(self):
return '%s %s:\n%s' % (self.result, self.test_name, self.msg)
def __repr__(self):
return str(self)
class MultiDelegate(object):
def __init__(self):
self.delegates = []
def __getattr__(self, name):
def handler(*args, **kwargs):
for d in self.delegates:
method = getattr(d, name)
method(*args, **kwargs)
return handler
class TerminalLog(object):
GREEN = '\033[92m'
RED = '\033[91m'
ENDC = '\033[0m'
def __init__(self, out = sys.stdout, verbose=False, show_command=False):
self.out = out
self.verbose = verbose
self.show_command = show_command
self.colorize = out.isatty()
def maybe_color(self, s, color):
if self.colorize:
return color + s + self.ENDC
else:
return s
def begin(self):
self.out.write(
'''## Testsuite started
## Invocation: %s
## Time: %s
''' % (' '.join(sys.argv), str(datetime.datetime.now())))
def start_suite(self, suite):
self.out.write('\n## Running testsuite: %s\n' % suite)
self.out.flush()
def start_test(self, test):
if self.show_command:
self.out.write(' Command: %s\n' % test.cmd)
self.out.write(' %-70s' % test.name)
self.out.flush()
def end_test(self, test):
if test.result == TestResult.PASS():
msg = self.maybe_color(str(test.result), self.GREEN)
elif test.result == TestResult.NOTRUN():
msg = str(test.result)
else:
msg = self.maybe_color(str(test.result), self.RED)
self.out.write('%s\n' % msg)
if self.verbose:
# might already shown the command
if test.errors:
self.out.write('Failed command: %s\n' % test.cmd)
for err in test.errors:
self.out.write('\n%s\n\n' % err)
self.out.flush()
def end(self, num_tests, num_failures):
self.out.write('\n')
if num_failures:
self.out.write(self.maybe_color('%d of %d tests failed\n' %
(num_failures, num_tests), self.RED))
else:
self.out.write(self.maybe_color('All %d tests passed\n' %
num_tests, self.GREEN))
if LOGFILE:
self.out.write('View complete log in the %s file.\n' % (LOGFILE))
self.out.flush()
class TextLog(object):
def __init__(self, logfile_name, verbose = False):
self.out = open(logfile_name, 'w')
self.logfile_name = logfile_name
self.verbose = verbose
def begin(self):
self.out.write(
'''## Testsuite started
## Time: %s
## Invocation: %s
''' % (str(datetime.datetime.now()), ' '.join(sys.argv)))
def start_suite(self, suite):
self.out.write('\n## Running testsuite: %s\n' % suite)
self.out.flush()
def start_test(self, test):
self.out.write('\n## Test: %s\n' % test.name)
self.out.write('## Command: %s\n' % test.cmd)
def end_test(self, test):
duration = timedelta_total_seconds(test.end_time - test.start_time)
self.out.write('## Duration: %f sec.\n' % duration)
self.out.write('## Result: %s\n' % test.result)
if test.errors:
self.out.write('## %s failures:\n' % str(test))
for err in test.errors:
self.out.write('\n%s\n' % err)
self.out.flush()
def end(self, num_tests, num_failures):
self.out.write('\n')
if num_failures:
self.out.write('%d of %d tests failed\n' % (num_failures, num_tests))
else:
self.out.write('All %d tests passed\n' % num_tests)
self.out.close()
class XMLLog(object):
def __init__(self, logfile_name):
self.out = open(logfile_name, 'w')
self.logfile_name = logfile_name
self.xml_doc = XMLGenerator(self.out, 'utf-8')
self.suite_started = False
def begin(self):
self.xml_doc.startDocument()
self.xml_doc.startElement('testsuites',AttributesImpl({}))
self.xml_doc.characters('\n')
self.xml_doc.startElement('invocation',AttributesImpl({}))
self.xml_doc.characters(' '.join(sys.argv))
self.xml_doc.endElement('invocation')
self.xml_doc.characters('\n')
def start_suite(self, suite):
if self.suite_started:
self.xml_doc.endElement('testsuite')
self.xml_doc.characters('\n')
self.suite_started = True
attrs = AttributesImpl({'name': suite})
self.xml_doc.startElement('testsuite', attrs)
self.xml_doc.characters('\n')
def start_test(self, test):
attrs = AttributesImpl({'name': test.name})
self.xml_doc.startElement('testcase', attrs)
self.xml_doc.characters('\n')
def end_test(self, test):
duration = timedelta_total_seconds(test.end_time - test.start_time)
self.xml_doc.startElement('duration',AttributesImpl({}))
self.xml_doc.characters(str(duration))
self.xml_doc.endElement('duration')
self.xml_doc.characters('\n')
attrs = AttributesImpl({})
self.xml_doc.startElement('result', attrs)
self.xml_doc.characters(str(test.result))
self.xml_doc.endElement('result')
self.xml_doc.characters('\n')
if test.errors:
self.xml_doc.startElement('errors', attrs)
self.xml_doc.characters('\n')
for err in test.errors:
self.xml_doc.startElement('error', attrs)
self.xml_doc.characters(str(err))
self.xml_doc.endElement('error')
self.xml_doc.characters('\n')
self.xml_doc.endElement('errors')
self.xml_doc.endElement('testcase')
self.xml_doc.characters('\n')
def end(self, num_tests, num_failures):
if self.suite_started:
self.xml_doc.endElement('testsuite')
self.xml_doc.characters('\n')
attrs = AttributesImpl({'tests': str(num_tests),
'failures': str(num_failures)})
self.xml_doc.startElement('result', attrs)
if num_failures:
self.xml_doc.characters(str(TestResult.FAIL()))
else:
self.xml_doc.characters(str(TestResult.PASS()))
self.xml_doc.endElement('result')
self.xml_doc.endElement('testsuites')
self.xml_doc.characters('\n')
self.xml_doc.endDocument()
self.out.close()
class TestCase(object):
def __init__(self, location, cmd, name, suite, success_codes, timeout):
self.location = location
self.cmd = cmd
self.name = name
self.suite = suite
self.success_codes = success_codes
self.timeout = timeout
self.result = TestResult.NOTRUN()
self.errors = []
self.start_time = 0
self.end_time = 0
self.stdout_run_name = os.path.join(self.cwd,name + '.stdout-actual')
self.stderr_run_name = os.path.join(self.cwd,name + '.stderr-actual')
self.stdout_name = os.path.join(self.cwd,name + '.stdout')
self.stderr_name = os.path.join(self.cwd,name + '.stderr')
self.stdout_diff_name = os.path.join(self.cwd,name + '.stdout-diff')
self.stderr_diff_name = os.path.join(self.cwd,name + '.stderr-diff')
@property
def cwd(self):
return self.location['cwd']
@property
def filename(self):
return self.location['filename']
@property
def lineno(self):
return self.location['lineno']
def __str__(self):
return '%s at %s:%d' %(self.name,
self.filename,
self.lineno)
def run_test(self):
timedout = False
self.start_time = datetime.datetime.now()
(timedout, exitcode) = execute_program(self.stdout_run_name,
self.stderr_run_name,
self.cwd,
self.cmd,
self.timeout)
self.end_time = datetime.datetime.now()
self.result = TestResult.PASS()
if timedout:
self.result = TestResult.TIMEDOUT()
self.errors.append(TestFailure(self,'Timed out after %d seconds' % self.timeout))
self.cleanup()
return
if self.success_codes and exitcode not in self.success_codes:
self.result = TestResult.FAIL()
self.errors.append(TestFailure(self,
'Terminated with unexpected exit code %d' % exitcode))
#Now diff the stdout and stderr output
stdout_name = self.stdout_name
if not os.path.exists(stdout_name):
stdout_name = '/dev/null'
stderr_name = self.stderr_name
if not os.path.exists(stderr_name):
stderr_name = '/dev/null'
stdout_diff = diff(stdout_name, self.stdout_run_name, self.stdout_diff_name)
stderr_diff = diff(stderr_name, self.stderr_run_name, self.stderr_diff_name)
if stdout_diff:
self.result = TestResult.FAIL()
self.errors.append(TestFailure(self, stdout_diff))
if stderr_diff:
self.result = TestResult.FAIL()
self.errors.append(TestFailure(self, stderr_diff))
self.cleanup()
def generate(self):
execute_program(self.stdout_name,
self.stderr_name,
self.cwd,
self.cmd,
self.timeout)
if os.path.getsize(self.stdout_name) == 0:
silentremove(self.stdout_name)
if os.path.getsize(self.stderr_name) == 0:
silentremove(self.stderr_name)
def cleanup(self):
silentremove(self.stdout_run_name)
silentremove(self.stderr_run_name)
silentremove(self.stdout_diff_name)
silentremove(self.stderr_diff_name)
def execute_program(stdout_name, stderr_name, cwd, cmd, timeout):
with open(stdout_name, 'wb') as stdout:
with open(stderr_name, 'wb') as stderr:
process= subprocess.Popen(cmd,
shell=True,
stdout=stdout,
stderr=stderr,
cwd=cwd
)
return wait_process(process, timeout)
def wait_process(proc, timeout):
proc_thread = threading.Thread(target=proc.communicate)
proc_thread.daemon = True
proc_thread.start()
proc_thread.join(timeout=timeout)
if proc_thread.is_alive():
try:
proc.kill()
return (True, -1)
except OSError:
#This takes care of most of the races between
#is_alive and kill. Though they don't matter much
#for our cases where the timeout should just be a guard
pass
return (False, proc.returncode)
def diff(orig, new, out):
cmd = ['diff', '-u', orig, new]
with open(out, 'w') as stdout:
exitcode = subprocess.call(cmd, stdout=stdout, stderr=stdout)
if exitcode not in [0, 1]: #diff itself failed
raise RuntimeError('Failed(exitcode=%d: %s ' %
(exitcode, str(cmd)))
with open(out, 'r') as diff_result:
return diff_result.read()
def run_tests(log, verbose=False, errexit=False):
num_tests = 0
num_failures = 0
current_suite = None
log.begin()
for test in ALL_TESTS:
num_tests += 1
if test.suite != current_suite:
log.start_suite(test.suite)
current_suite = test.suite
log.start_test(test)
test.run_test()
if test.errors:
num_failures += 1
log.end_test(test)
if test.errors and errexit:
break
log.end(num_tests, num_failures)
return num_tests, num_failures
def generate_test_files(errexit=False):
num_failures = 0
for test in ALL_TESTS:
sys.stdout.write('Regenerating %s with command: %s\n' % (test.name,
test.cmd))
try:
test.generate()
except Exception as ex:
sys.stdout.write('%s failed: %s\n' % (test.name, str(ex)))
num_failures += 1
if num_failures > 0 and errexit:
break
return num_failures == 0
def timedelta_total_seconds(t):
"""Total seconds in the duration.
Needed since timedelta.total_seconds() doesn't exist in Python 2.6"""
return ((t.days * 86400.0 + t.seconds)*10.0**6 + t.microseconds) / 10.0**6
def execpyfile(filename, defines):
with open(filename) as f:
exec_globals = defines.copy()
exec_globals.update({'DefTest': DefTest, 'DefSuite': DefSuite})
exec_globals.update(defines)
code = compile(f.read(), filename, 'exec')
exec(code, exec_globals, None)
def silentremove(filename):
try:
os.remove(filename)
except OSError:
pass
def clean():
silentremove(LOGFILE)
for test in ALL_TESTS:
test.cleanup()
def list_tests():
if ALL_TESTS:
sys.stdout.write('Available tests:\n')
l = max([len(t.suite) for t in ALL_TESTS]) + 1
sys.stdout.write('%-*s Name\n' % (l, 'Suite'))
sys.stdout.write('%s\n' % ('-' * (l + 5)))
for test in ALL_TESTS:
sys.stdout.write('%-*s %s\n' % (l, test.suite, test.name))
sys.stdout.flush()
else:
sys.stdout.write('No tests found\n')
def filter_tests(keywords, getter):
global ALL_TESTS
filtered_tests = []
for include in keywords:
r = re.compile(include)
filtered_tests += [t for t in ALL_TESTS if r.search(getter(t)) and t not in filtered_tests]
ALL_TESTS = filtered_tests
def parse_defines(defines):
defines_dict = {}
for d in defines:
(var, sep, value) = d.partition('=')
if not sep:
sys.stdout.write("Error define '%s' is not on the form name=value\n" % d)
sys.exit(1)
defines_dict[var] = value
return defines_dict
def main():
global DEFAULT_TEST_TIMEOUT
parser = optparse.OptionParser(usage='usage: %prog [options] test1 ...',
version=VERSION)
parser.add_option('-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Verbose output, shows diffs on failed testcases')
parser.add_option('-C', '--show-command',
action='store_true', dest='show_command', default=False,
help='Show the command being run for each test.')
parser.add_option('-x', '--exit_success',
action='store_true', dest='exit_success', default=False,
help='Always exit with exit code 0. '+
'Default exit with 0 if all test succeeds or 1 if '
'any test fails')
parser.add_option('-c', '--clean',
action='store_true', dest='clean', default=False,
help='Clean any output artifacts left by the tests')
parser.add_option('-l', '--list',
action='store_true', dest='list_tests', default=False,
help='List all the test cases')
parser.add_option('-e', '--errexit',
action='store_true', dest='errexit', default=False,
help='Stop on the first test that fails')
parser.add_option('-f', '--logfile',
action='store', dest='logfile',
help='Name of the output log file')
parser.add_option('-k', '--keyword',
action='append', dest='keyword', default=[],
help='Run only tests matching the given keyword. ' +
'KEYWORD is a regexp. Can be given multiple times.')
parser.add_option('-s', '--suite',
action='append', dest='suite', default=[],
help='Run only test suites matching the given suite. ' +
'SUITE is a regexp. Can be given multiple times.')
parser.add_option('--xml',
action='store_true', dest='xml', default=False,
help='Write the logfile in XML format')
parser.add_option('-D', '--define',
action='append', dest='define', default=[],
help='Define a variable available to the test files.' +
'The variable is defined with the form name=value.' +
'Can be given multiple times')
parser.add_option('-g', '--generate',
action='store_true', dest='generate', default=False,
help='Run the defined test case commands and generate ' +
'the output files. Use with care, this overwrites ' +
'existing output files')
parser.add_option('-t', '--timeout',
action='store', dest='timeout', default=None,
help='Override default individual test timeout in seconds. ' +
'Does not affect tests that explicitly defined a timeout')
(options, args) = parser.parse_args()
if not args:
sys.stdout.write('Error: No test files given\n')
parser.print_help()
return 1
global LOGFILE
if options.logfile:
LOGFILE = options.logfile
elif options.xml:
LOGFILE='testsuite.xml'
else:
LOGFILE='testsuite.log'
if options.timeout:
DEFAULT_TEST_TIMEOUT = options.timeout
defines = parse_defines(options.define)
for testfile in args:
execpyfile(testfile, defines)
if options.keyword:
filter_tests(options.keyword, lambda t: t.name)
if options.suite:
filter_tests(options.suite, lambda t: t.suite)
if options.clean:
clean()
return 0
if options.list_tests:
list_tests()
return 0
if not ALL_TESTS:
sys.stdout.write('No test cases defined in any of the definition files:\n' +
'\n'.join(args) + '\n')
return 1
ok = False
if options.generate:
ok = generate_test_files(errexit=options.errexit)
else:
log = MultiDelegate()
if options.xml:
log.delegates.append(XMLLog(LOGFILE))
else:
log.delegates.append(TextLog(LOGFILE, options.verbose))
log.delegates.append(TerminalLog(verbose=options.verbose,
show_command=options.show_command))
(total, failed) = run_tests(log, errexit=options.errexit)
ok = failed == 0
if options.exit_success or ok:
return 0
return 1
if __name__== '__main__':
sys.exit(main())
|
noselasd/testrunner
|
testrunner.py
|
Python
|
mit
| 21,002 | 0.005142 |
from ipctest import IpcTest
from gi.repository import i3ipc
import pytest
@pytest.mark.skip(reason='TODO')
class TestGetConfig(IpcTest):
def test_get_config(self, i3):
config = i3.get_config()
assert isinstance(config, i3ipc.ConfigReply)
with open('test/i3.config') as f:
assert config.config == f.read()
|
acrisci/i3ipc-glib
|
test/test_get_config.py
|
Python
|
gpl-3.0
| 347 | 0 |
from twisted.trial import unittest
class TestTwisted(unittest.TestCase):
def test(self):
pass
def test_fail(self):
self.fail("I failed")
def test_error(self):
raise TypeError("oops, wrong type")
def test_skip(self):
raise unittest.SkipTest('skip me')
|
DESHRAJ/fjord
|
vendor/packages/nose/functional_tests/support/twist/test_twisted.py
|
Python
|
bsd-3-clause
| 304 | 0.003289 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 12:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notification',
name='queued',
field=models.BooleanField(db_index=True, default=False),
),
]
|
CSchool/SchoolSite
|
CSchoolSite/main/migrations/0002_notification_queued.py
|
Python
|
apache-2.0
| 457 | 0 |
"""
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer the renders the browsable API.
"""
from __future__ import unicode_literals
import json
import django
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Page
from django.http.multipartparser import parse_header
from django.template import Context, RequestContext, loader, Template
from django.test.client import encode_multipart
from django.utils import six
from rest_framework import exceptions, serializers, status, VERSION
from rest_framework.compat import SHORT_SEPARATORS, LONG_SEPARATORS, INDENT_SEPARATORS
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.request import is_form_media_type, override_method
from rest_framework.utils import encoders
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.utils.field_mapping import ClassLookupDict
def zero_as_none(value):
return None if value == 0 else value
class BaseRenderer(object):
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
charset = 'utf-8'
render_style = 'text'
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplementedError('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: http://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header(accepted_media_type.encode('ascii'))
try:
return zero_as_none(max(min(int(params['indent']), 8), 0))
except (KeyError, ValueError, TypeError):
pass
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return bytes(ret.encode('utf-8'))
return ret
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
The data supplied to the Response object should be a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
context = self.resolve_context(data, request, response)
return template.render(context)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def resolve_context(self, data, request, response):
if response.exception:
data['status_code'] = response.status_code
return RequestContext(request, data)
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
elif hasattr(view, 'template_name'):
return [view.template_name]
raise ImproperlyConfigured(
'Returned a template response with no `template_name` attribute set on either the view or response'
)
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except Exception:
# Fall back to using eg '404 Not Found'
return Template('%d %s' % (response.status_code,
response.status_text.title()))
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context['response']
if response and response.exception:
request = renderer_context['request']
template = self.get_exception_template(response)
context = self.resolve_context(data, request, response)
return template.render(context)
return data
class HTMLFormRenderer(BaseRenderer):
"""
Renderers serializer data into an HTML form.
If the serializer was instantiated without an object then this will
return an HTML form not bound to any object,
otherwise it will return an HTML form with the appropriate initial data
populated from the object.
Note that rendering of field and form errors is not currently supported.
"""
media_type = 'text/html'
format = 'form'
charset = 'utf-8'
template_pack = 'rest_framework/horizontal/'
base_template = 'form.html'
default_style = ClassLookupDict({
serializers.Field: {
'base_template': 'input.html',
'input_type': 'text'
},
serializers.EmailField: {
'base_template': 'input.html',
'input_type': 'email'
},
serializers.URLField: {
'base_template': 'input.html',
'input_type': 'url'
},
serializers.IntegerField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.DateTimeField: {
'base_template': 'input.html',
'input_type': 'datetime-local'
},
serializers.DateField: {
'base_template': 'input.html',
'input_type': 'date'
},
serializers.TimeField: {
'base_template': 'input.html',
'input_type': 'time'
},
serializers.FileField: {
'base_template': 'input.html',
'input_type': 'file'
},
serializers.BooleanField: {
'base_template': 'checkbox.html'
},
serializers.ChoiceField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.MultipleChoiceField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.RelatedField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.ManyRelatedField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.Serializer: {
'base_template': 'fieldset.html'
},
serializers.ListSerializer: {
'base_template': 'list_fieldset.html'
}
})
def render_field(self, field, parent_style):
if isinstance(field._field, serializers.HiddenField):
return ''
style = dict(self.default_style[field])
style.update(field.style)
if 'template_pack' not in style:
style['template_pack'] = parent_style.get('template_pack', self.template_pack)
style['renderer'] = self
if style.get('input_type') == 'datetime-local' and isinstance(field.value, six.text_type):
field.value = field.value.rstrip('Z')
if 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
template = loader.get_template(template_name)
context = Context({'field': field, 'style': style})
return template.render(context)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
form = data.serializer
meta = getattr(form, 'Meta', None)
style = getattr(meta, 'style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
if 'base_template' not in style:
style['base_template'] = self.base_template
style['renderer'] = self
# This API needs to be finessed and finalized for 3.1
if 'template' in renderer_context:
template_name = renderer_context['template']
elif 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
renderer_context = renderer_context or {}
request = renderer_context['request']
template = loader.get_template(template_name)
context = RequestContext(request, {
'form': form,
'style': style
})
return template.render(context)
class BrowsableAPIRenderer(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
charset = 'utf-8'
form_renderer_class = HTMLFormRenderer
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
non_template_renderers = [renderer for renderer in renderers
if not hasattr(renderer, 'get_template_names')]
if not renderers:
return None
elif non_template_renderers:
return non_template_renderers[0]()
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
if not api_settings.FORM_METHOD_OVERRIDE:
return # Cannot use form overloading
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
if (
not getattr(view, 'get_serializer', None) or
not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
serializer = existing_serializer
else:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
dict(
list(self.renderer_context.items()) +
[('template', 'rest_framework/api_form.html')]
)
)
def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
with override_method(view, request, method) as request:
# If we're not using content overloading there's no point in
# supplying a generic form, as the view won't treat the form's
# value as the content of the request.
if not (api_settings.FORM_CONTENT_OVERRIDE and
api_settings.FORM_CONTENTTYPE_OVERRIDE):
return None
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if (hasattr(view, 'get_serializer') and renderer_class):
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
content = renderer.render(serializer.data, accepted, context)
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE
content_field = api_settings.FORM_CONTENT_OVERRIDE
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
# NB. http://jacobian.org/writing/dynamic-form-generation/
class GenericContentForm(forms.Form):
def __init__(self):
super(GenericContentForm, self).__init__()
self.fields[content_type_field] = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial
)
self.fields[content_field] = forms.CharField(
label='Content',
widget=forms.Textarea,
initial=content
)
return GenericContentForm()
def get_name(self, view):
return view.get_view_name()
def get_description(self, view):
return view.get_view_description(html=True)
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path)
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = dict(response.items())
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if hasattr(view, 'paginator') and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
context = RequestContext(renderer_context['request'], context)
ret = template.render(context)
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
class MultiPartRenderer(BaseRenderer):
media_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
format = 'multipart'
charset = 'utf-8'
BOUNDARY = 'BoUnDaRyStRiNg' if django.VERSION >= (1, 5) else b'BoUnDaRyStRiNg'
def render(self, data, accepted_media_type=None, renderer_context=None):
return encode_multipart(self.BOUNDARY, data)
|
chirilo/remo
|
vendor-local/lib/python/rest_framework/renderers.py
|
Python
|
bsd-3-clause
| 24,971 | 0.000921 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# import json
# replace with simplejson
import simplejson as json
import os
import time
import logging
import traceback
import sys
from oslo_config import cfg
from yabgp.common import constants as bgp_cons
from yabgp.handler import BaseHandler
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MSG_PROCESS_OPTS = [
cfg.BoolOpt('write_disk',
default=True,
help='Whether the BGP message is written to disk'),
cfg.StrOpt('write_dir',
default=os.path.join(os.environ.get('HOME') or '.', 'data/bgp/'),
help='The BGP messages storage path'),
cfg.IntOpt('write_msg_max_size',
default=500,
help='The Max size of one BGP message file, the unit is MB'),
cfg.BoolOpt('write_keepalive',
default=False,
help='Whether write keepalive message to disk')
]
CONF.register_opts(MSG_PROCESS_OPTS, group='message')
class DefaultHandler(BaseHandler):
def __init__(self):
super(DefaultHandler, self).__init__()
'''
{<peer>: (<path>, <current file>)}
'''
self.peer_files = {}
'''
{<peer>: <seq number>}
'''
self.msg_sequence = {}
def init(self):
if CONF.message.write_disk:
self.init_msg_file(CONF.bgp.running_config['remote_addr'].lower())
def init_msg_file(self, peer_addr):
msg_file_path_for_peer = os.path.join(
CONF.message.write_dir,
peer_addr
)
if not os.path.exists(msg_file_path_for_peer):
os.makedirs(msg_file_path_for_peer)
LOG.info('Create dir %s for peer %s', msg_file_path_for_peer, peer_addr)
LOG.info('BGP message file path is %s', msg_file_path_for_peer)
if msg_file_path_for_peer and peer_addr not in self.peer_files:
msg_path = msg_file_path_for_peer + '/msg/'
if not os.path.exists(msg_path):
os.makedirs(msg_path)
# try get latest file and msg sequence if any
last_msg_seq, msg_file_name = DefaultHandler.get_last_seq_and_file(msg_path)
if not msg_file_name:
msg_file_name = "%s.msg" % time.time()
# store the message sequence
self.msg_sequence[peer_addr] = last_msg_seq + 1
msg_file = open(os.path.join(msg_path, msg_file_name), 'a')
msg_file.flush()
self.peer_files[peer_addr] = (msg_path, msg_file)
LOG.info('BGP message file %s', msg_file_name)
LOG.info('The last bgp message seq number is %s', last_msg_seq)
@staticmethod
def get_last_seq_and_file(msg_path):
"""
Get the last sequence number in the latest log file.
"""
LOG.info('get the last bgp message seq for this peer')
last_seq = 0
# first get the last file
file_list = os.listdir(msg_path)
if not file_list:
return last_seq, None
file_list.sort()
msg_file_name = file_list[-1]
try:
with open(msg_path + msg_file_name, 'r') as fh:
line = None
for line in fh:
pass
last = line
if line:
if last.startswith('['):
last_seq = eval(last)[1]
elif last.startswith('{'):
last_seq = json.loads(last)['seq']
except OSError:
LOG.error('Error when reading bgp message files')
except Exception as e:
LOG.debug(traceback.format_exc())
LOG.error(e)
sys.exit()
return last_seq, msg_file_name
def write_msg(self, peer, timestamp, msg_type, msg):
"""
write bgp message into local disk file
:param peer: peer address
:param timestamp: timestamp
:param msg_type: message type (0,1,2,3,4,5,6)
:param msg: message dict
:param msg_path: path to store messages on disk
:return:
"""
msg_path, msg_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
msg_seq = self.msg_sequence[peer.lower()]
msg_record = {
't': timestamp,
'seq': msg_seq,
'type': msg_type
}
msg_record.update(msg)
try:
json.dump(msg_record, msg_file)
except Exception as e:
LOG.error(e)
LOG.info('raw message %s', msg)
msg_file.write('\n')
self.msg_sequence[peer.lower()] += 1
msg_file.flush()
os.fsync(msg_file.fileno())
def check_file_size(self, peer):
"""if the size of the msg file is bigger than 'max_msg_file_size',
then save as and re-open a new file.
"""
msg_path, cur_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
if os.path.getsize(cur_file.name) >= CONF.message.write_msg_max_size:
cur_file.close()
msg_file_name = "%s.msg" % time.time()
LOG.info('Open a new message file %s', msg_file_name)
msg_file = open(os.path.join(msg_path + msg_file_name), 'a')
self.peer_files[peer.lower()] = (msg_path, msg_file)
return True
return False
def on_update_error(self, peer, timestamp, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=6,
msg={'msg': msg}
)
def update_received(self, peer, timestamp, msg):
# write message to disk
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=bgp_cons.MSG_UPDATE,
msg={"msg": msg}
)
self.check_file_size(peer.factory.peer_addr)
def keepalive_received(self, peer, timestamp):
"""
keepalive message default handler
:param peer:
:param timestamp:
:return:
"""
if peer.msg_recv_stat['Keepalives'] == 1:
# do something with the connection establish event
pass
if CONF.message.write_keepalive:
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=4,
msg={"msg": None}
)
def open_received(self, peer, timestamp, result):
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=1,
msg={"msg": result}
)
def route_refresh_received(self, peer, msg, msg_type):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=msg_type,
msg={"msg": msg}
)
def notification_received(self, peer, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=3,
msg={"msg": msg}
)
def on_connection_lost(self, peer):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=bgp_cons.MSG_BGP_CLOSED,
msg={"msg": None}
)
def on_connection_failed(self, peer, msg):
self.write_msg(
peer=peer,
timestamp=time.time(),
msg_type=0,
msg={"msg": msg}
)
def on_established(self, peer, msg):
pass
|
meidli/yabgp
|
yabgp/handler/default_handler.py
|
Python
|
apache-2.0
| 7,759 | 0.000516 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
import os, os.path
import re
import sys
from pprint import pprint, pformat
|
msztolcman/ff
|
test/mocks/__init__.py
|
Python
|
mit
| 190 | 0.005263 |
# Natural Language Toolkit: Parsers
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@ldc.upenn.edu>
# Scott Currie <sccurrie@seas.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: __init__.py,v 1.1.1.2 2004/09/29 21:58:23 adastra Exp $
"""
Classes and interfaces for producing tree structures that represent
the internal organziation of a text. This task is known as X{parsing}
the text, and the resulting tree structures are called the text's
X{parses}. Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called X{ambiguous} texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does I{not} distinguish these two types of
ambiguity.
The parser module defines C{ParserI}, a standard interface for parsing
texts; and two simple implementations of that interface,
C{ShiftReduceParser} and C{RecursiveDescentParser}. It also contains
three sub-modules for specialized kinds of parsing:
- C{nltk.parser.chart} defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- C{nltk.parser.chunk} defines chunk parsing, which identifies
non-overlapping linguistic groups in a text.
- C{nltk.parser.probabilistic} defines probabilistic parsing, which
associates a probability with each parse.
@group Interfaces: ParserI
@group Parsers: ShiftReduceParser, SteppingShiftReduceParser,
RecursiveDescentParser, SteppingRecursiveDescentParser
@sort: ParserI, ShiftReduceParser, SteppingShiftReduceParser,
RecursiveDescentParser, SteppingRecursiveDescentParser,
demo, chart, chunk, probabilistic
@see: C{nltk.cfg}
"""
from nltk import TaskI, PropertyIndirectionMixIn
from nltk.tree import Tree, ImmutableTree
from nltk.token import Token
from nltk.cfg import Nonterminal, CFG, CFGProduction, nonterminals
from nltk.chktype import chktype
import types
##//////////////////////////////////////////////////////
## Parser Interface
##//////////////////////////////////////////////////////
class ParserI(TaskI):
"""
A processing class for deriving trees that represent possible
structures for a sequence of tokens. These tree structures are
known as X{parses}. Typically, parsers are used to derive syntax
trees for sentences. But parsers can also be used to derive other
kinds of tree structure, such as morphological trees and discourse
structures.
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def parse(self, token):
"""
Derive a parse tree that represents the structure of the given
token's C{SUBTOKENS}, and output it to the token's C{TREE}
property. If no parse are found, then output C{None}. If
multiple parses are found, then output the best parse.
The parsed trees derive a structure for the subtokens, but do
not modify them. In particular, the leaves of the subtree
should be equal to the list of subtokens.
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
raise NotImplementedError()
def get_parse(self, token):
"""
@return: A parse tree that represents the structure of the
given token's C{SUBTOKENS}. If no parse is found, then return
C{None}.
@rtype: L{Tree}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_list(self, token):
"""
@return: A list of the parse trees that could represent the
structure of the given token's C{SUBTOKENS}. When possible,
this list should be sorted from most likely to least likely.
@rtype: C{list} of L{Tree}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_probs(self, token):
"""
@return: A probability distribution over the parse trees that
could represent the structure of the given token's
C{SUBTOKENS}.
@rtype: L{ProbDistI}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_list(self, token):
"""
@return: A dictioanry mapping from parse trees that could
represent the structure of the given token's C{SUBTOKENS} to
numeric scores.
@rtype: C{dict}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
##//////////////////////////////////////////////////////
## Abstract Base Class for Parsers
##//////////////////////////////////////////////////////
class AbstractParser(ParserI, PropertyIndirectionMixIn):
"""
An abstract base class for parsers. C{AbstractParser} provides
a default implementation for:
- L{parse} (based on C{get_parse})
- L{get_parse_list} (based on C{get_parse})
- L{get_parse} (based on C{get_parse_list})
Note that subclasses must override either C{get_parse} or
C{get_parse_list} (or both), to avoid infinite recursion.
"""
def __init__(self, **property_names):
"""
Construct a new parser.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
# Make sure we're not directly instantiated:
if self.__class__ == AbstractParser:
raise AssertionError, "Abstract classes can't be instantiated"
PropertyIndirectionMixIn.__init__(self, **property_names)
def parse(self, token):
TREE = self.property('TREE')
token[TREE] = self.get_parse(token)
def get_parse(self, token):
trees = self.get_parse_list(token)
if len(trees) == 0: return None
else: return trees[0]
def get_parse_list(self, token):
tree = self.get_parse(token)
if tree is None: return []
else: return [tree]
##//////////////////////////////////////////////////////
## Shift/Reduce Parser
##//////////////////////////////////////////////////////
class ShiftReduceParser(AbstractParser):
"""
A simple bottom-up CFG parser that uses two operations, "shift"
and "reduce", to find a single parse for a text.
C{ShiftReduceParser} maintains a stack, which records the
structure of a portion of the text. This stack is a list of
C{Token}s and C{Tree}s that collectively cover a portion of
the text. For example, while parsing the sentence "the dog saw
the man" with a typical grammar, C{ShiftReduceParser} will produce
the following stack, which covers "the dog saw"::
[(NP: (Det: <'the'>) (N: <'dog'>)), (V: <'saw'>)]
C{ShiftReduceParser} attempts to extend the stack to cover the
entire text, and to combine the stack elements into a single tree,
producing a complete parse for the sentence.
Initially, the stack is empty. It is extended to cover the text,
from left to right, by repeatedly applying two operations:
- X{shift} moves a token from the beginning of the text to the
end of the stack.
- X{reduce} uses a CFG production to combine the rightmost stack
elements into a single C{Tree}.
Often, more than one operation can be performed on a given stack.
In this case, C{ShiftReduceParser} uses the following heuristics
to decide which operation to perform:
- Only shift if no reductions are available.
- If multiple reductions are available, then apply the reduction
whose CFG production is listed earliest in the grammar.
Note that these heuristics are not guaranteed to choose an
operation that leads to a parse of the text. Also, if multiple
parses exists, C{ShiftReduceParser} will return at most one of
them.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
"""
Create a new C{ShiftReduceParser}, that uses C{grammar} to
parse texts.
@type grammar: C{CFG}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
AbstractParser.__init__(self, **property_names)
self._check_grammar()
def grammar(self):
"""
@return: The grammar used to parse texts.
@rtype: C{CFG}
"""
return self._grammar
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
@param grammar: The new grammar.
@type grammar: C{CFG}
"""
assert chktype(1, grammar, CFG)
self._grammar = grammar
def get_parse(self, token):
assert chktype(1, token, [Token], (Token))
SUBTOKENS = self.property('SUBTOKENS')
LEAF = self.property('LEAF')
# initialize the stack.
stack = []
remaining_text = token[SUBTOKENS][:]
# Trace output.
if self._trace:
leaves = [tok[LEAF] for tok in token[SUBTOKENS]]
print 'Parsing %r' % ' '.join(leaves)
self._trace_stack(stack, remaining_text)
# iterate through the text, pushing the token's type onto
# the stack, then reducing the stack.
while len(remaining_text) > 0:
self._shift(stack, remaining_text)
while self._reduce(stack, remaining_text): pass
# Did we reduce everything?
if len(stack) != 1: return None
# Did we end up with the right category?
if stack[0].node != self._grammar.start().symbol():
return None
# We parsed successfully!
return stack[0]
def _shift(self, stack, remaining_text):
"""
Move a token from the beginning of C{remaining_text} to the
end of C{stack}.
@type stack: C{list} of C{Token} and C{Tree}
@param stack: A list of C{Token}s and C{Tree}s, encoding
the structure of the text that has been parsed so far.
@type remaining_text: C{list} of C{Token}
@param remaining_text: The portion of the text that is not yet
covered by C{stack}.
@rtype: C{None}
"""
stack.append(remaining_text[0])
remaining_text.remove(remaining_text[0])
if self._trace: self._trace_shift(stack, remaining_text)
def _match_rhs(self, rhs, rightmost_stack):
"""
@rtype: C{boolean}
@return: true if the right hand side of a CFG production
matches the rightmost elements of the stack. C{rhs}
matches C{rightmost_stack} if they are the same length,
and each element of C{rhs} matches the corresponding
element of C{rightmost_stack}. A nonterminal element of
C{rhs} matches any C{Tree} whose node value is equal
to the nonterminal's symbol. A terminal element of C{rhs}
matches any C{Token} whose type is equal to the terminal.
@type rhs: C{list} of (terminal and C{Nonterminal})
@param rhs: The right hand side of a CFG production.
@type rightmost_stack: C{list} of (C{Token} and C{Tree})
@param rightmost_stack: The rightmost elements of the parser's
stack.
"""
if len(rightmost_stack) != len(rhs): return 0
for i in range(len(rightmost_stack)):
if isinstance(rightmost_stack[i], Tree):
if not isinstance(rhs[i], Nonterminal): return 0
if rightmost_stack[i].node != rhs[i].symbol(): return 0
else:
if isinstance(rhs[i], Nonterminal): return 0
if rightmost_stack[i]['TEXT'] != rhs[i]: return 0
return 1
def _reduce(self, stack, remaining_text, production=None):
"""
Find a CFG production whose right hand side matches the
rightmost stack elements; and combine those stack elements
into a single C{Tree}, with the node specified by the
production's left-hand side. If more than one CFG production
matches the stack, then use the production that is listed
earliest in the grammar. The new C{Tree} replaces the
elements in the stack.
@rtype: C{CFGProduction} or C{None}
@return: If a reduction is performed, then return the CFG
production that the reduction is based on; otherwise,
return false.
@type stack: C{list} of C{Token} and C{Tree}
@param stack: A list of C{Token}s and C{Tree}s, encoding
the structure of the text that has been parsed so far.
@type remaining_text: C{list} of C{Token}
@param remaining_text: The portion of the text that is not yet
covered by C{stack}.
"""
if production is None: productions = self._grammar.productions()
else: productions = [production]
# Try each production, in order.
for production in productions:
rhslen = len(production.rhs())
# check if the RHS of a production matches the top of the stack
if self._match_rhs(production.rhs(), stack[-rhslen:]):
# combine the tree to reflect the reduction
tree = Tree(production.lhs().symbol(), stack[-rhslen:])
stack[-rhslen:] = [tree]
# We reduced something
if self._trace:
self._trace_reduce(stack, production, remaining_text)
return production
# We didn't reduce anything
return None
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
assert chktype(1, trace, types.IntType)
# 1: just show shifts.
# 2: show shifts & reduces
# 3: display which tokens & productions are shifed/reduced
self._trace = trace
def _trace_stack(self, stack, remaining_text, marker=' '):
"""
Print trace output displaying the given stack and text.
@rtype: C{None}
@param marker: A character that is printed to the left of the
stack. This is used with trace level 2 to print 'S'
before shifted stacks and 'R' before reduced stacks.
"""
LEAF = self.property('LEAF')
str = ' '+marker+' [ '
for elt in stack:
if isinstance(elt, Tree):
str += `Nonterminal(elt.node)` + ' '
else:
str += `elt[LEAF]` + ' '
str += '* ' + ' '.join([`s[LEAF]` for s in remaining_text]) + ']'
print str
def _trace_shift(self, stack, remaining_text):
"""
Print trace output displaying that a token has been shifted.
@rtype: C{None}
"""
if self._trace > 2: print 'Shift %r:' % stack[-1]
if self._trace == 2: self._trace_stack(stack, remaining_text, 'S')
elif self._trace > 0: self._trace_stack(stack, remaining_text)
def _trace_reduce(self, stack, production, remaining_text):
"""
Print trace output displaying that C{production} was used to
reduce C{stack}.
@rtype: C{None}
"""
if self._trace > 2:
rhs = ' '.join([`s` for s in production.rhs()])
print 'Reduce %r <- %s' % (production.lhs(), rhs)
if self._trace == 2: self._trace_stack(stack, remaining_text, 'R')
elif self._trace > 1: self._trace_stack(stack, remaining_text)
def _check_grammar(self):
"""
Check to make sure that all of the CFG productions are
potentially useful. If any productions can never be used,
then print a warning.
@rtype: C{None}
"""
productions = self._grammar.productions()
# Any production whose RHS is an extension of another production's RHS
# will never be used.
for i in range(len(productions)):
for j in range(i+1, len(productions)):
rhs1 = productions[i].rhs()
rhs2 = productions[j].rhs()
if rhs1[:len(rhs2)] == rhs2:
print 'Warning: %r will never be used' % productions[i]
##//////////////////////////////////////////////////////
## Recursive Descent Parser
##//////////////////////////////////////////////////////
class RecursiveDescentParser(AbstractParser):
"""
A simple top-down CFG parser that parses texts by recursively
expanding the fringe of a C{Tree}, and matching it against a
text.
C{RecursiveDescentParser} uses a list of tree locations called a
X{frontier} to remember which subtrees have not yet been expanded
and which leaves have not yet been matched against the text. Each
tree location consists of a list of child indices specifying the
path from the root of the tree to a subtree or a leaf; see the
reference documentation for C{Tree} for more information
about tree locations.
When the parser begins parsing a text, it constructs a tree
containing only the start symbol, and a frontier containing the
location of the tree's root node. It then extends the tree to
cover the text, using the following recursive procedure:
- If the frontier is empty, and the text is covered by the tree,
then return the tree as a possible parse.
- If the frontier is empty, and the text is not covered by the
tree, then return no parses.
- If the first element of the frontier is a subtree, then
use CFG productions to X{expand} it. For each applicable
production, add the expanded subtree's children to the
frontier, and recursively find all parses that can be
generated by the new tree and frontier.
- If the first element of the frontier is a token, then X{match}
it against the next token from the text. Remove the token
from the frontier, and recursively find all parses that can be
generated by the new tree and frontier.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
"""
Create a new C{RecursiveDescentParser}, that uses C{grammar}
to parse texts.
@type grammar: C{CFG}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
AbstractParser.__init__(self, **property_names)
def grammar(self):
"""
@return: The grammar used to parse texts.
@rtype: C{CFG}
"""
return self._grammar
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
@param grammar: The new grammar.
@type grammar: C{CFG}
"""
assert chktype(1, grammar, CFG)
self._grammar = grammar
def get_parse_list(self, token):
# Inherit docs from ParserI
assert chktype(1, token, Token)
SUBTOKENS = self.property('SUBTOKENS')
# Start a recursive descent parse, with an initial tree
# containing just the start symbol.
start = self._grammar.start().symbol()
initial_tree = Tree(start, [])
frontier = [()]
text = token[SUBTOKENS]
if self._trace:
self._trace_start(initial_tree, frontier, text)
parses = self._parse(text, initial_tree, frontier)
# Return the parses.
return parses
def _parse(self, remaining_text, tree, frontier):
"""
Recursively expand and match each elements of C{tree}
specified by C{frontier}, to cover C{remaining_text}. Return
a list of all parses found.
@return: A list of all parses that can be generated by
matching and expanding the elements of C{tree}
specified by C{frontier}.
@rtype: C{list} of C{Tree}
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type remaining_text: C{list} of C{Token}s
@param remaining_text: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched. This list sorted
in left-to-right order of location within the tree.
"""
# If the tree covers the text, and there's nothing left to
# expand, then we've found a complete parse; return it.
if len(remaining_text) == 0 and len(frontier) == 0:
if self._trace:
self._trace_succeed(tree, frontier)
return [tree]
# If there's still text, but nothing left to expand, we failed.
elif len(frontier) == 0:
if self._trace:
self._trace_backtrack(tree, frontier)
return []
# If the next element on the frontier is a tree, expand it.
elif isinstance(tree[frontier[0]], Tree):
return self._expand(remaining_text, tree, frontier)
# If the next element on the frontier is a token, match it.
else:
return self._match(remaining_text, tree, frontier)
def _match(self, rtext, tree, frontier):
"""
@rtype: C{list} of C{Tree}
@return: a list of all parses that can be generated by
matching the first element of C{frontier} against the
first token in C{rtext}. In particular, if the first
element of C{frontier} has the same type as the first
token in C{rtext}, then substitute the token into
C{tree}; and return all parses that can be generated by
matching and expanding the remaining elements of
C{frontier}. If the first element of C{frontier} does not
have the same type as the first token in C{rtext}, then
return empty list.
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type rtext: C{list} of C{Token}s
@param rtext: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
LEAF = self.property('LEAF')
tree_leaf = tree[frontier[0]][LEAF]
if (len(rtext) > 0 and tree_leaf == rtext[0]['TEXT']):
# If it's a terminal that matches text[0], then substitute
# in the token, and continue parsing.
newtree = tree.copy(deep=True)
newtree[frontier[0]] = rtext[0]
if self._trace:
self._trace_match(newtree, frontier[1:], rtext[0])
return self._parse(rtext[1:], newtree, frontier[1:])
else:
# If it's a non-matching terminal, fail.
if self._trace:
self._trace_backtrack(tree, frontier, rtext[:1])
return []
def _expand(self, remaining_text, tree, frontier, production=None):
"""
@rtype: C{list} of C{Tree}
@return: A list of all parses that can be generated by
expanding the first element of C{frontier} with
C{production}. In particular, if the first element of
C{frontier} is a subtree whose node type is equal to
C{production}'s left hand side, then add a child to that
subtree for each element of C{production}'s right hand
side; and return all parses that can be generated by
matching and expanding the remaining elements of
C{frontier}. If the first element of C{frontier} is not a
subtree whose node type is equal to C{production}'s left
hand side, then return an empty list. If C{production} is
not specified, then return a list of all parses that can
be generated by expanding the first element of C{frontier}
with I{any} CFG production.
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type remaining_text: C{list} of C{Token}s
@param remaining_text: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
LEAF = self.property('LEAF')
if production is None: productions = self._grammar.productions()
else: productions = [production]
parses = []
for production in productions:
lhs = production.lhs().symbol()
if lhs == tree[frontier[0]].node:
subtree = self._production_to_tree(production)
if frontier[0] == ():
newtree = subtree
else:
newtree = tree.copy(deep=True)
newtree[frontier[0]] = subtree
new_frontier = [frontier[0]+(i,) for i in
range(len(production.rhs()))]
if self._trace:
self._trace_expand(newtree, new_frontier, production)
parses += self._parse(remaining_text, newtree,
new_frontier + frontier[1:])
return parses
def _production_to_tree(self, production):
"""
@rtype: C{Tree}
@return: The C{Tree} that is licensed by C{production}.
In particular, given the production::
C{[M{lhs} -> M{elt[1]} ... M{elt[n]}]}
Return a tree token that has a node C{M{lhs}.symbol}, and
C{M{n}} children. For each nonterminal element
C{M{elt[i]}} in the production, the tree token has a
childless subtree with node value C{M{elt[i]}.symbol}; and
for each terminal element C{M{elt[j]}}, the tree token has
a leaf token with type C{M{elt[j]}}.
@param production: The CFG production that licenses the tree
token that should be returned.
@type production: C{CFGProduction}
"""
LEAF = self.property('LEAF')
children = []
for elt in production.rhs():
if isinstance(elt, Nonterminal):
children.append(Tree(elt.symbol(), []))
else:
# This will be matched.
children.append(Token({LEAF: elt}))
return Tree(production.lhs().symbol(), children)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
assert chktype(1, trace, types.IntType)
self._trace = trace
def _trace_fringe(self, tree, treeloc=None):
"""
Print trace output displaying the fringe of C{tree}. The
fringe of C{tree} consists of all of its leaves and all of
its childless subtrees.
@rtype: C{None}
"""
LEAF = self.property('LEAF')
if treeloc == (): print "*",
if isinstance(tree, Tree):
if len(tree) == 0: print `Nonterminal(tree.node)`,
for i in range(len(tree)):
if treeloc is not None and i == treeloc[0]:
self._trace_fringe(tree[i], treeloc[1:])
else:
self._trace_fringe(tree[i])
else:
print `tree`,
def _trace_tree(self, tree, frontier, operation):
"""
Print trace output displaying the parser's current state.
@param operation: A character identifying the operation that
generated the current state.
@rtype: C{None}
"""
if self._trace == 2: print ' %c [' % operation,
else: print ' [',
if len(frontier) > 0: self._trace_fringe(tree, frontier[0])
else: self._trace_fringe(tree)
print ']'
def _trace_start(self, tree, frontier, text):
print 'Parsing %r' % ' '.join([tok['TEXT'] for tok in text])
if self._trace > 2: print 'Start:'
if self._trace > 1: self._trace_tree(tree, frontier, ' ')
def _trace_expand(self, tree, frontier, production):
if self._trace > 2: print 'Expand: %s' % production
if self._trace > 1: self._trace_tree(tree, frontier, 'E')
def _trace_match(self, tree, frontier, tok):
if self._trace > 2: print 'Match: %r' % tok
if self._trace > 1: self._trace_tree(tree, frontier, 'M')
def _trace_succeed(self, tree, frontier):
if self._trace > 2: print 'GOOD PARSE:'
if self._trace == 1: print 'Found a parse:\n%s' % tree
if self._trace > 1: self._trace_tree(tree, frontier, '+')
def _trace_backtrack(self, tree, frontier, toks=None):
if self._trace > 2:
if toks: print 'Backtrack: %r match failed' % toks[0]
else: print 'Backtrack'
##//////////////////////////////////////////////////////
## Stepping Shift/Reduce Parser
##//////////////////////////////////////////////////////
class SteppingShiftReduceParser(ShiftReduceParser):
"""
A C{ShiftReduceParser} that allows you to setp through the parsing
process, performing a single operation at a time. It also allows
you to change the parser's grammar midway through parsing a text.
The C{initialize} method is used to start parsing a text.
C{shift} performs a single shift operation, and C{reduce} performs
a single reduce operation. C{step} will perform a single reduce
operation if possible; otherwise, it will perform a single shift
operation. C{parses} returns the set of parses that have been
found by the parser.
@ivar _history: A list of C{(stack, remaining_text)} pairs,
containing all of the previous states of the parser. This
history is used to implement the C{undo} operation.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
self._stack = None
self._remaining_text = None
self._history = []
AbstractParser.__init__(self, **property_names)
def get_parse_list(self, token):
assert chktype(1, token, Token)
self.initialize(token)
while self.step(): pass
return self.parses()
def stack(self):
"""
@return: The parser's stack.
@rtype: C{list} of C{Token} and C{Tree}
"""
return self._stack
def remaining_text(self):
"""
@return: The portion of the text that is not yet covered by the
stack.
@rtype: C{list} of C{Token}
"""
return self._remaining_text
def initialize(self, token):
"""
Start parsing a given text. This sets the parser's stack to
C{[]} and sets its remaining text to C{token['SUBTOKENS']}.
"""
assert chktype(1, token, Token)
SUBTOKENS = self.property('SUBTOKENS')
self._stack = []
self._remaining_text = token[SUBTOKENS][:]
self._history = []
def step(self):
"""
Perform a single parsing operation. If a reduction is
possible, then perform that reduction, and return the
production that it is based on. Otherwise, if a shift is
possible, then perform it, and return 1. Otherwise,
return 0.
@return: 0 if no operation was performed; 1 if a shift was
performed; and the CFG production used to reduce if a
reduction was performed.
@rtype: C{CFGProduction} or C{boolean}
"""
return self.reduce() or self.shift()
def shift(self):
"""
Move a token from the beginning of the remaining text to the
end of the stack. If there are no more tokens in the
remaining text, then do nothing.
@return: True if the shift operation was successful.
@rtype: C{boolean}
"""
if len(self._remaining_text) == 0: return 0
self._history.append( (self._stack[:], self._remaining_text[:]) )
self._shift(self._stack, self._remaining_text)
return 1
def reduce(self, production=None):
"""
Use C{production} to combine the rightmost stack elements into
a single C{Tree}. If C{production} does not match the
rightmost stack elements, then do nothing.
@return: The production used to reduce the stack, if a
reduction was performed. If no reduction was performed,
return C{None}.
@rtype: C{CFGProduction} or C{None}
"""
assert chktype(1, production, CFGProduction, types.NoneType)
self._history.append( (self._stack[:], self._remaining_text[:]) )
return_val = self._reduce(self._stack, self._remaining_text,
production)
if not return_val: self._history.pop()
return return_val
def undo(self):
"""
Return the parser to its state before the most recent
shift or reduce operation. Calling C{undo} repeatedly return
the parser to successively earlier states. If no shift or
reduce operations have been performed, C{undo} will make no
changes.
@return: true if an operation was successfully undone.
@rtype: C{boolean}
"""
if len(self._history) == 0: return 0
(self._stack, self._remaining_text) = self._history.pop()
return 1
def reducible_productions(self):
"""
@return: A list of the productions for which reductions are
available for the current parser state.
@rtype: C{list} of C{CFGProduction}
"""
productions = []
for production in self._grammar.productions():
rhslen = len(production.rhs())
if self._match_rhs(production.rhs(), self._stack[-rhslen:]):
productions.append(production)
return productions
def parses(self):
"""
@return: A list of the parses that have been found by this
parser so far.
@rtype: C{list} of C{Tree}
"""
if len(self._remaining_text) != 0: return []
if len(self._stack) != 1: return []
if self._stack[0].node != self._grammar.start().symbol():
return []
return self._stack
##//////////////////////////////////////////////////////
## Stepping Recursive Descent Parser
##//////////////////////////////////////////////////////
class SteppingRecursiveDescentParser(RecursiveDescentParser):
"""
A C{RecursiveDescentParser} that allows you to step through the
parsing process, performing a single operation at a time.
The C{initialize} method is used to start parsing a text.
C{expand} expands the first element on the frontier using a single
CFG production, and C{match} matches the first element on the
frontier against the next text token. C{backtrack} undoes the most
recent expand or match operation. C{step} performs a single
expand, match, or backtrack operation. C{parses} returns the set
of parses that have been found by the parser.
@ivar _history: A list of C{(rtext, tree, frontier)} tripples,
containing the previous states of the parser. This history is
used to implement the C{backtrack} operation.
@ivar _tried_e: A record of all productions that have been tried
for a given tree. This record is used by C{expand} to perform
the next untried production.
@ivar _tried_m: A record of what tokens have been matched for a
given tree. This record is used by C{step} to decide whether
or not to match a token.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
self._rtext = None
self._tree = None
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
AbstractParser.__init__(self, **property_names)
# [XX] TEMPORARY HACK WARNING! This should be replaced with
# something nicer when we get the chance.
def _freeze(self, tree):
c = tree.copy()
for pos in c.treepositions('leaves'):
c[pos] = c[pos].freeze()
return ImmutableTree.convert(c)
def get_parse_list(self, token):
assert chktype(1, token, Token)
TREES = self.property('TREES')
self.initialize(token)
while self.step() is not None: pass
return self.parses()
def initialize(self, token):
"""
Start parsing a given text. This sets the parser's tree to
the start symbol, its frontier to the root node, and its
remaining text to C{token['SUBTOKENS']}.
"""
assert chktype(1, token, Token)
SUBTOKENS = self.property('SUBTOKENS')
self._rtext = token[SUBTOKENS]
start = self._grammar.start().symbol()
self._tree = Tree(start, [])
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
if self._trace:
self._trace_start(self._tree, self._frontier, self._rtext)
def remaining_text(self):
"""
@return: The portion of the text that is not yet covered by the
tree.
@rtype: C{list} of C{Token}
"""
return self._rtext
def frontier(self):
"""
@return: A list of the tree locations of all subtrees that
have not yet been expanded, and all leaves that have not
yet been matched.
@rtype: C{list} of C{tuple} of C{int}
"""
return self._frontier
def tree(self):
"""
@return: A partial structure for the text that is
currently being parsed. The elements specified by the
frontier have not yet been expanded or matched.
@rtype: C{Tree}
"""
return self._tree
def step(self):
"""
Perform a single parsing operation. If an untried match is
possible, then perform the match, and return the matched
token. If an untried expansion is possible, then perform the
expansion, and return the production that it is based on. If
backtracking is possible, then backtrack, and return 1.
Otherwise, return 0.
@return: 0 if no operation was performed; a token if a match
was performed; a production if an expansion was performed;
and 1 if a backtrack operation was performed.
@rtype: C{CFGProduction} or C{Token} or C{boolean}
"""
# Try matching (if we haven't already)
if self.untried_match():
token = self.match()
if token is not None: return token
# Try expanding.
production = self.expand()
if production is not None: return production
# Try backtracking
if self.backtrack():
self._trace_backtrack(self._tree, self._frontier)
return 1
# Nothing left to do.
return None
def expand(self, production=None):
"""
Expand the first element of the frontier. In particular, if
the first element of the frontier is a subtree whose node type
is equal to C{production}'s left hand side, then add a child
to that subtree for each element of C{production}'s right hand
side. If C{production} is not specified, then use the first
untried expandable production. If all expandable productions
have been tried, do nothing.
@return: The production used to expand the frontier, if an
expansion was performed. If no expansion was performed,
return C{None}.
@rtype: C{CFGProduction} or C{None}
"""
LEAF = self.property('LEAF')
assert chktype(1, production, CFGProduction, types.NoneType)
# Make sure we *can* expand.
if len(self._frontier) == 0:
return None
if not isinstance(self._tree[self._frontier[0]],
Tree):
return None
# If they didn't specify a production, check all untried ones.
if production is None:
productions = self.untried_expandable_productions()
else: productions = [production]
parses = []
for prod in productions:
# Record that we've tried this production now.
self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
# Try expanding.
if self._expand(self._rtext, self._tree, self._frontier, prod):
return prod
# We didn't expand anything.
return None
def match(self):
"""
Match the first element of the frontier. In particular, if
the first element of the frontier has the same type as the
next text token, then substitute the text token into the tree.
@return: The token matched, if a match operation was
performed. If no match was performed, return C{None}
@rtype: C{Token} or C{None}
"""
LEAF = self.property('LEAF')
# Record that we've tried matching this token.
tok = self._rtext[0]
self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
# Make sure we *can* match.
if len(self._frontier) == 0:
return None
if isinstance(self._tree[self._frontier[0]], Tree):
return None
if self._match(self._rtext, self._tree, self._frontier):
# Return the token we just matched.
return self._history[-1][0][0]
else:
return None
def backtrack(self):
"""
Return the parser to its state before the most recent
match or expand operation. Calling C{undo} repeatedly return
the parser to successively earlier states. If no match or
expand operations have been performed, C{undo} will make no
changes.
@return: true if an operation was successfully undone.
@rtype: C{boolean}
"""
if len(self._history) == 0: return 0
(self._rtext, self._tree, self._frontier) = self._history.pop()
return 1
def expandable_productions(self):
"""
@return: A list of all the productions for which expansions
are available for the current parser state.
@rtype: C{list} of C{CFGProduction}
"""
# Make sure we *can* expand.
if len(self._frontier) == 0: return []
frontier_child = self._tree[self._frontier[0]]
if (len(self._frontier) == 0 or
not isinstance(frontier_child, Tree)):
return []
return [p for p in self._grammar.productions()
if p.lhs().symbol() == frontier_child.node]
def untried_expandable_productions(self):
"""
@return: A list of all the untried productions for which
expansions are available for the current parser state.
@rtype: C{list} of C{CFGProduction}
"""
LEAF = self.property('LEAF')
tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
return [p for p in self.expandable_productions()
if p not in tried_expansions]
def untried_match(self):
"""
@return: Whether the first element of the frontier is a token
that has not yet been matched.
@rtype: C{boolean}
"""
LEAF = self.property('LEAF')
if len(self._rtext) == 0: return 0
tried_matches = self._tried_m.get(self._freeze(self._tree), [])
return (self._rtext[0] not in tried_matches)
def currently_complete(self):
"""
@return: Whether the parser's current state represents a
complete parse.
@rtype: C{boolean}
"""
return (len(self._frontier) == 0 and len(self._rtext) == 0)
def _parse(self, remaining_text, tree, frontier):
"""
A stub version of C{_parse} that sets the parsers current
state to the given arguments. In C{RecursiveDescentParser},
the C{_parse} method is used to recursively continue parsing a
text. C{SteppingRecursiveDescentParser} overrides it to
capture these recursive calls. It records the parser's old
state in the history (to allow for backtracking), and updates
the parser's new state using the given arguments. Finally, it
returns C{[1]}, which is used by C{match} and C{expand} to
detect whether their operations were successful.
@return: C{[1]}
@rtype: C{list} of C{int}
"""
self._history.append( (self._rtext, self._tree, self._frontier) )
self._rtext = remaining_text
self._tree = tree
self._frontier = frontier
# Is it a good parse? If so, record it.
if (len(frontier) == 0 and len(remaining_text) == 0):
self._parses.append(tree)
self._trace_succeed(self._tree, self._frontier)
return [1]
def parses(self):
"""
@return: A list of the parses that have been found by this
parser so far.
@rtype: C{list} of C{Tree}
"""
return self._parses
##//////////////////////////////////////////////////////
## Demonstration Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the parsers defined by nltk.parser. The user
is prompted to select which parser to run, and that parser is run
on an example sentence with a simple grammar.
"""
# Define some nonterminals
S, VP, NP, PP = nonterminals('S, VP, NP, PP')
V, N, P, Name, Det = nonterminals('V, N, P, Name, Det')
# Define a grammar.
productions = (
# Syntactic Productions
CFGProduction(S, [NP, 'saw', NP]),
CFGProduction(S, [NP, VP]),
CFGProduction(NP, [Det, N]),
CFGProduction(VP, [V, NP, PP]),
CFGProduction(NP, [Det, N, PP]),
CFGProduction(PP, [P, NP]),
# Lexical Productions
CFGProduction(NP, ['I']), CFGProduction(Det, ['the']),
CFGProduction(Det, ['a']), CFGProduction(N, ['man']),
CFGProduction(V, ['saw']), CFGProduction(P, ['in']),
CFGProduction(P, ['with']), CFGProduction(N, ['park']),
CFGProduction(N, ['dog']), CFGProduction(N, ['telescope'])
)
grammar = CFG(S, productions)
# Tokenize a sample sentence.
sent = Token(TEXT='I saw a man in the park')
from nltk.tokenizer import WhitespaceTokenizer
WhitespaceTokenizer(SUBTOKENS='WORDS').tokenize(sent)
# Define a list of parsers.
parsers = [ShiftReduceParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS'),
RecursiveDescentParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS'),
SteppingShiftReduceParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS'),
SteppingRecursiveDescentParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS')]
# Ask the user to choose a parser.
import sys
print 'Choose a parser:'
for i in range(len(parsers)):
print ' %d. %s' % (i+1, parsers[i].__class__.__name__)
print '=> ',
try: parser = parsers[int(sys.stdin.readline())-1]
except: print 'Bad input'; return
# Run the parser.
parser.trace()
for p in parser.get_parse_list(sent):
print p
if __name__ == '__main__': demo()
|
ronaldahmed/robot-navigation
|
neural-navigation-with-lstm/MARCO/nltk/parser/__init__.py
|
Python
|
mit
| 52,728 | 0.002731 |
from .gaussian_process import RandomFeatureGaussianProcess, mean_field_logits
from .spectral_normalization import SpectralNormalization
|
gagnonlg/explore-ml
|
sngp/tf_import/__init__.py
|
Python
|
gpl-3.0
| 136 | 0 |
__author__ = 'alexander'
def fibonacci(n):
voorlaatste_cijfer = 1
laatste_cijfer = 1
print(voorlaatste_cijfer)
print(laatste_cijfer)
for i in range(n-2):# n – 2, omdat we de eerste twee cijfers al weten
nieuw_cijfer = laatste_cijfer + voorlaatste_cijfer
print(nieuw_cijfer)
voorlaatste_cijfer = laatste_cijfer
laatste_cijfer = nieuw_cijfer
fibonacci(6)
|
Alexanderkorn/Automatisation
|
oude scripts/les 5/fibonacci-reeks.py
|
Python
|
gpl-3.0
| 415 | 0.007264 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_7/ar_/test_artificial_32_RelativeDifference_MovingAverage_7__20.py
|
Python
|
bsd-3-clause
| 276 | 0.083333 |
from __future__ import print_function
import configparser as cp
import fileOps as f
import err
import logging
#EXAMPLE_LIST_FN = 'example_list'
example_list_str = \
'''0_vanDerPol = ./examples/vanderpol_python/
1_vanDerPol = ./examples/vanderpol_m_file/
2_dc = ./examples/dc_controller_hand_coded/
3_dci = ./examples/dc_controller_hand_coded_input/
4_ex1a = ./examples/ex1a/
5_ex1b = ./examples/ex1b/
6_AbstractFuelControl = ./examples/abstractFuelControl/
7_AbstractFuelControl = ./examples/abstractFuelControlCombined/
8_fuzzy_invp = ./examples/fuzzy_invp/
9_heater = ./examples/heater/
10_GIF = ./examples/GI_fisher/'''
logger = logging.getLogger(__name__)
def get_example_list_old():
## ##!!##logger.debug('reading example listing: {}'.format(EXAMPLE_LIST_FN))
example_dict = cp.parse_config(example_list_str)
# TODO: Remedy below hack
# Ugly hack: parse_config() adds type = string
# To fix it, we just delete it.
del example_dict['type']
example_list = [0] * len(example_dict)
for (k, v) in example_dict.iteritems():
d = {}
(n, k) = k.split('_', 1) # split only on the first '_'
d['filename'] = k + '.tst'
d['path'] = v
d['description'] = v + k
example_list[int(n)] = d
return example_list
def get_example_list():
return crawl_examples()
def crawl_examples():
EXAMPLE_DICT = './examples/'
TST_FILE_GLOB_PATTERN = '*.tst'
example_list = []
sub_dir_list = f.get_sub_dir_listing(EXAMPLE_DICT)
for sub_dir in sub_dir_list:
file_name_list = f.get_file_list_matching(TST_FILE_GLOB_PATTERN, sub_dir)
if len(file_name_list) > 1:
raise err.Fatal('More than one .tst file found!! {}'.format(file_name_list))
if len(file_name_list) != 0:
file_path = f.get_abs_base_path(file_name_list[0])
system_name = f.get_file_name_from_path(file_name_list[0])
d = {}
d['filename'] = system_name
d['path'] = file_path
d['description'] = '{:-<50} {}'.format(system_name, file_path)
example_list.append(d)
return example_list
if __name__ == '__main__':
for i in crawl_examples():
print(i['description'])
|
zutshi/S3CAMX
|
src/example_list.py
|
Python
|
bsd-2-clause
| 2,248 | 0.003114 |
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.kickstart
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides some simple shortcuts to make using Werkzeug simpler
for small scripts.
These improvements include predefined `Request` and `Response` objects as
well as a predefined `Application` object which can be customized in child
classes, of course. The `Request` and `Reponse` objects handle URL
generation as well as sessions via `werkzeug.contrib.sessions` and are
purely optional.
There is also some integration of template engines. The template loaders
are, of course, not neccessary to use the template engines in Werkzeug,
but they provide a common interface. Currently supported template engines
include Werkzeug's minitmpl and Genshi_. Support for other engines can be
added in a trivial way. These loaders provide a template interface
similar to the one used by Django_.
.. _Genshi: http://genshi.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from os import path
from ..wrappers import Request as RequestBase, Response as ResponseBase
from ..templates import Template
from ..exceptions import HTTPException
from ..routing import RequestRedirect
__all__ = ['Request', 'Response', 'TemplateNotFound', 'TemplateLoader',
'GenshiTemplateLoader', 'Application']
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.kickstart is deprecated and '
'will be removed in Werkzeug 1.0'))
class Request(RequestBase):
"""A handy subclass of the base request that adds a URL builder.
It when supplied a session store, it is also able to handle sessions.
"""
def __init__(self, environ, url_map,
session_store=None, cookie_name=None):
# call the parent for initialization
RequestBase.__init__(self, environ)
# create an adapter
self.url_adapter = url_map.bind_to_environ(environ)
# create all stuff for sessions
self.session_store = session_store
self.cookie_name = cookie_name
if session_store is not None and cookie_name is not None:
if cookie_name in self.cookies:
# get the session out of the storage
self.session = session_store.get(self.cookies[cookie_name])
else:
# create a new session
self.session = session_store.new()
def url_for(self, callback, **values):
return self.url_adapter.build(callback, values)
class Response(ResponseBase):
"""
A subclass of base response which sets the default mimetype to text/html.
It the `Request` that came in is using Werkzeug sessions, this class
takes care of saving that session.
"""
default_mimetype = 'text/html'
def __call__(self, environ, start_response):
# get the request object
request = environ['werkzeug.request']
if request.session_store is not None:
# save the session if neccessary
request.session_store.save_if_modified(request.session)
# set the cookie for the browser if it is not there:
if request.cookie_name not in request.cookies:
self.set_cookie(request.cookie_name, request.session.sid)
# go on with normal response business
return ResponseBase.__call__(self, environ, start_response)
class Processor(object):
"""A request and response processor - it is what Django calls a
middleware, but Werkzeug also includes straight-foward support for real
WSGI middlewares, so another name was chosen.
The code of this processor is derived from the example in the Werkzeug
trac, called `Request and Response Processor
<http://dev.pocoo.org/projects/werkzeug/wiki/RequestResponseProcessor>`_
"""
def process_request(self, request):
return request
def process_response(self, request, response):
return response
def process_view(self, request, view_func, view_args, view_kwargs):
"""process_view() is called just before the Application calls the
function specified by view_func.
If this returns None, the Application processes the next Processor,
and if it returns something else (like a Response instance), that
will be returned without any further processing.
"""
return None
def process_exception(self, request, exception):
return None
class Application(object):
"""A generic WSGI application which can be used to start with Werkzeug in
an easy, straightforward way.
"""
def __init__(self, name, url_map, session=False, processors=None):
# save the name and the URL-map, as it'll be needed later on
self.name = name
self.url_map = url_map
# save the list of processors if supplied
self.processors = processors or []
# create an instance of the storage
if session:
self.store = session
else:
self.store = None
def __call__(self, environ, start_response):
# create a request - with or without session support
if self.store is not None:
request = Request(environ, self.url_map,
session_store=self.store, cookie_name='%s_sid' % self.name)
else:
request = Request(environ, self.url_map)
# apply the request processors
for processor in self.processors:
request = processor.process_request(request)
try:
# find the callback to which the URL is mapped
callback, args = request.url_adapter.match(request.path)
except (HTTPException, RequestRedirect), e:
response = e
else:
# check all view processors
for processor in self.processors:
action = processor.process_view(request, callback, (), args)
if action is not None:
# it is overriding the default behaviour, this is
# short-circuiting the processing, so it returns here
return action(environ, start_response)
try:
response = callback(request, **args)
except Exception, exception:
# the callback raised some exception, need to process that
for processor in reversed(self.processors):
# filter it through the exception processor
action = processor.process_exception(request, exception)
if action is not None:
# the exception processor returned some action
return action(environ, start_response)
# still not handled by a exception processor, so re-raise
raise
# apply the response processors
for processor in reversed(self.processors):
response = processor.process_response(request, response)
# return the completely processed response
return response(environ, start_response)
def config_session(self, store, expiration='session'):
"""
Configures the setting for cookies. You can also disable cookies by
setting store to None.
"""
self.store = store
# expiration=session is the default anyway
# TODO: add settings to define the expiration date, the domain, the
# path any maybe the secure parameter.
class TemplateNotFound(IOError, LookupError):
"""
A template was not found by the template loader.
"""
def __init__(self, name):
IOError.__init__(self, name)
self.name = name
class TemplateLoader(object):
"""
A simple loader interface for the werkzeug minitmpl
template language.
"""
def __init__(self, search_path, encoding='utf-8'):
self.search_path = path.abspath(search_path)
self.encoding = encoding
def get_template(self, name):
"""Get a template from a given name."""
filename = path.join(self.search_path, *[p for p in name.split('/')
if p and p[0] != '.'])
if not path.exists(filename):
raise TemplateNotFound(name)
return Template.from_file(filename, self.encoding)
def render_to_response(self, *args, **kwargs):
"""Load and render a template into a response object."""
return Response(self.render_to_string(*args, **kwargs))
def render_to_string(self, *args, **kwargs):
"""Load and render a template into a unicode string."""
try:
template_name, args = args[0], args[1:]
except IndexError:
raise TypeError('name of template required')
return self.get_template(template_name).render(*args, **kwargs)
class GenshiTemplateLoader(TemplateLoader):
"""A unified interface for loading Genshi templates. Actually a quite thin
wrapper for Genshi's TemplateLoader.
It sets some defaults that differ from the Genshi loader, most notably
auto_reload is active. All imporant options can be passed through to
Genshi.
The default output type is 'html', but can be adjusted easily by changing
the `output_type` attribute.
"""
def __init__(self, search_path, encoding='utf-8', **kwargs):
TemplateLoader.__init__(self, search_path, encoding)
# import Genshi here, because we don't want a general Genshi
# dependency, only a local one
from genshi.template import TemplateLoader as GenshiLoader
from genshi.template.loader import TemplateNotFound
self.not_found_exception = TemplateNotFound
# set auto_reload to True per default
reload_template = kwargs.pop('auto_reload', True)
# get rid of default_encoding as this template loaders overwrites it
# with the value of encoding
kwargs.pop('default_encoding', None)
# now, all arguments are clean, pass them on
self.loader = GenshiLoader(search_path, default_encoding=encoding,
auto_reload=reload_template, **kwargs)
# the default output is HTML but can be overridden easily
self.output_type = 'html'
self.encoding = encoding
def get_template(self, template_name):
"""Get the template which is at the given name"""
try:
return self.loader.load(template_name, encoding=self.encoding)
except self.not_found_exception, e:
# catch the exception raised by Genshi, convert it into a werkzeug
# exception (for the sake of consistency)
raise TemplateNotFound(template_name)
def render_to_string(self, template_name, context=None):
"""Load and render a template into an unicode string"""
# create an empty context if no context was specified
context = context or {}
tmpl = self.get_template(template_name)
# render the template into a unicode string (None means unicode)
return tmpl. \
generate(**context). \
render(self.output_type, encoding=None)
|
AntonioMtn/NZBMegaSearch
|
werkzeug/contrib/kickstart.py
|
Python
|
gpl-2.0
| 11,308 | 0.000354 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2014 AvanzOSC (Daniel). All Rights Reserved
# Date: 20/02/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import wizard
|
avanzosc/avanzosc6.1
|
avanzosc_stpick_expectdate/__init__.py
|
Python
|
agpl-3.0
| 1,019 | 0.003925 |
def itemTemplate():
return ['object/tangible/loot/npc_loot/shared_software_module_orange_generic.iff']
def customItemName():
return 'Mark V Vocab Module'
def lootDescriptor():
return 'customattributes'
def customizationAttributes():
return ['/private/index_color_1']
def customizationValues():
return [0]
def stackable():
return 1
def junkDealerPrice():
return 28
def junkType():
return 0
|
ProjectSWGCore/NGECore2
|
scripts/loot/lootItems/re_junk/mark_v_vocab_module.py
|
Python
|
lgpl-3.0
| 418 | 0.064593 |
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add start points for existing sandboxes
"""
import os, os.path
import sys
import logging
BINDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.join(BINDIR, "..", "..", "lib", "python2.6"))
import aquilon.aqdb.depends
import aquilon.worker.depends
from aquilon.config import Config
from sqlalchemy.orm import defer
from sqlalchemy.sql import text
from sqlalchemy.exc import DatabaseError
from aquilon.aqdb.model import Base, Sandbox, Domain
from aquilon.aqdb.db_factory import DbFactory
from aquilon.worker.processes import run_git
db = DbFactory()
Base.metadata.bind = db.engine
session = db.Session()
config = Config()
def main():
print "Calculating sandbox base commits. This may take around 10 minutes."
logging.basicConfig(level=logging.WARNING)
kingdir = config.get("broker", "kingdir")
domains = session.query(Domain).all()
# Define preference order when multiple domains have the same commits.
# This is just cosmetics, but makes it easier to verify the output.
for idx, domain in enumerate(("prod", "qa", "secure-aquilon-prod",
"secure-aquilon-qa")):
dbdom = Domain.get_unique(session, domain, compel=True)
domains.remove(dbdom)
domains.insert(idx, dbdom)
base_commits = {}
q = session.query(Sandbox)
q = q.order_by('name')
# The base_commit column does not exist yet...
q = q.options(defer("base_commit"))
for sandbox in q:
base_domain = None
base_commit = None
min_ahead = None
commits = run_git(["rev-list", "refs/heads/" + sandbox.name], path=kingdir).split("\n")
for domain in domains:
merge_base = run_git(["merge-base", "refs/heads/" + sandbox.name,
"refs/heads/" + domain.name],
path=kingdir).strip()
# Number of commits since branching from the given domain
ahead = commits.index(merge_base)
if base_domain is None or ahead < min_ahead:
base_domain = domain
base_commit = merge_base
min_ahead = ahead
if min_ahead == 0:
break
print "{0: <40}: {1.name} (ahead {2})".format(sandbox, base_domain,
min_ahead)
base_commits[sandbox.name] = base_commit
session.expunge_all()
try:
if session.bind.dialect.name == 'oracle':
query = text("""
ALTER TABLE sandbox ADD base_commit VARCHAR2(40 CHAR)
""")
elif session.bind.dialect.name == 'postgresql':
query = text("""
ALTER TABLE sandbox ADD base_commit CHARACTER VARYING (40)
""")
print "\nExecuting: %s" % query
session.execute(query)
session.commit()
except DatabaseError:
# Allow the script to be re-run by not failing if the column already
# exists. If the column does not exist, then trying to update it will
# fail anyway.
print """
WARNING: Adding the sandbox.base_commit column has failed. If you're running
this script for the second time, then that's likely OK, otherwise you should
verify and correct the schema manually.
"""
session.rollback()
for sandbox in q:
sandbox.base_commit = base_commits[sandbox.name]
session.commit()
try:
if session.bind.dialect.name == 'oracle':
query = text("""
ALTER TABLE sandbox MODIFY (base_commit VARCHAR2(40 CHAR)
CONSTRAINT sandbox_base_commit_nn NOT NULL)
""")
elif session.bind.dialect.name == 'postgresql':
query = text("""
ALTER TABLE sandbox ALTER COLUMN base_commit SET NOT NULL
""")
print "\nExecuting: %s" % query
session.execute(query)
session.commit()
except DatabaseError:
print """
WARNING: Enabling the NOT NULL constraint for sandbox.base_commit column has
failed. If you're running this script for the second time, then that's likely
OK, otherwise you should verify and correct the schema manually.
"""
session.rollback()
if __name__ == '__main__':
main()
|
stdweird/aquilon
|
upgrade/1.8.18/add_sandbox_startpoint.py
|
Python
|
apache-2.0
| 4,924 | 0.002437 |
import pytest
from canon.seq.seqreader import SeqReader
from .. import resource
def test_read_seq():
reader = SeqReader(resource('seq/Quartz_500Mpa_.SEQ'))
reader.get_Om()
Z, _, N = reader.get_Zmap('orsnr___')
def test_merge_Zmap():
reader = SeqReader()
reader.read_seq(resource('seq/au30_a1_.SEQ'))
Z1, _, N1 = reader.get_Zmap('orsnr___')
reader.read_seq(resource('seq/au30_m1_.SEQ'))
Z2, _, N2 = reader.get_Zmap('orsnr___')
Z, N = SeqReader.merge_Zmap(Z1, Z2, N1, N2)
if __name__ == '__main__':
pytest.main()
|
structrans/Canon
|
test/seq/test_seqreader.py
|
Python
|
mit
| 560 | 0 |
# -*- coding: utf-8 -*-
"""
blohg.vcs_backends.git.changectx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Model with classes to represent Git change context.
:copyright: (c) 2010-2013 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
import time
from flask.helpers import locked_cached_property
from pygit2 import Repository, GIT_OBJ_BLOB, GIT_OBJ_TREE
from zlib import adler32
from blohg.vcs_backends.git.filectx import FileCtx
from blohg.vcs import ChangeCtx
class ChangeCtxDefault(ChangeCtx):
"""Class with the specific implementation details for the change context
of the default revision state of the repository. It inherits the common
implementation from the class :class:`ChangeCtxBase`.
"""
def __init__(self, repo_path):
self._repo_path = repo_path
self._repo = Repository(self._repo_path)
self._ctx = self._repo[self.revision_id]
@locked_cached_property
def files(self):
def r(_files, repo, tree, prefix=None):
for entry in tree:
obj = repo[entry.oid]
filename = prefix and (prefix + '/' + entry.name) or entry.name
if obj.type == GIT_OBJ_TREE:
r(_files, repo, obj, filename)
elif obj.type == GIT_OBJ_BLOB:
_files.append(filename)
else:
raise RuntimeError('Invalid object: %s' % filename)
f = []
r(f, self._repo, self._ctx.tree)
return sorted(f)
@locked_cached_property
def revision_id(self):
"""This property should be cached because the lookup_reference method
reloads itself.
"""
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
raise RuntimeError('Branch "master" not found!')
return ref.target
def needs_reload(self):
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
return True
return self.revision_id != ref.target
def filectx_needs_reload(self, filectx):
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
raise RuntimeError('Branch "master" not found!')
return filectx._changectx.oid != ref.target
def published(self, date, now):
return date <= now
def etag(self, filectx):
return 'blohg-%i-%i-%s' % (filectx.mdate or filectx.date,
len(filectx.data),
adler32(filectx.path.encode('utf-8'))
& 0xffffffff)
def get_filectx(self, path):
return FileCtx(self._repo, self._ctx, path)
class ChangeCtxWorkingDir(ChangeCtxDefault):
"""Class with the specific implementation details for the change context
of the working dir of the repository. It inherits the common implementation
from the class :class:`ChangeCtxBase`.
"""
@locked_cached_property
def revision_id(self):
if self._repo.workdir is None:
raise RuntimeError('Bare repositories should be deployed with '
'REVISION_DEFAULT change context')
try:
return self._repo.head.target
except Exception:
raise RuntimeError('HEAD reference not found! Please do your '
'first commit.')
@locked_cached_property
def files(self):
return [entry.path for entry in self._repo.index]
def needs_reload(self):
"""This change context is mainly used by the command-line tool, and
didn't provides any reliable way to evaluate its "freshness". Always
reload.
"""
return True
def filectx_needs_reload(self, filectx):
return True
def published(self, date, now):
return True
def etag(self, filectx):
return 'blohg-%i-%i-%s' % (time.time(), len(filectx.data),
adler32(filectx.path.encode('utf-8'))
& 0xffffffff)
def get_filectx(self, path):
return FileCtx(self._repo, self._ctx, path, use_index=True)
|
rafaelmartins/blohg
|
blohg/vcs_backends/git/changectx.py
|
Python
|
gpl-2.0
| 4,270 | 0 |
from __future__ import unicode_literals
from django.apps import AppConfig
class GooglemapstweetConfig(AppConfig):
name = 'googleMapsTweet'
|
anshulsharmanyu/twitter_plot
|
Twitter Map Cloud Assignment/googleMapsTweet/apps.py
|
Python
|
gpl-3.0
| 145 | 0.006897 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reading and writing variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
class VariableOpsTest(xla_test.XLATestCase):
"""Test cases for resource variable operators."""
def testWriteEmptyShape(self):
# Verifies that we can pass an uninitialized variable with an empty shape,
# assign it a value, and successfully return it.
for dtype in self.numeric_types:
with self.test_session() as sess, self.test_scope():
zeros = np.zeros([3, 0], dtype=dtype)
v = resource_variable_ops.ResourceVariable(zeros)
p = array_ops.placeholder(dtype)
x = v.assign(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(zeros, sess.run(y, {p: zeros}))
def testOneWriteOneOutput(self):
# Regression test for a bug where computations with one non-constant
# output and one variable update were mishandled.
for dtype in self.numeric_types:
init = np.array([[1, 2j], [3, 4]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
p = array_ops.placeholder(dtype)
x = v.assign_add(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(
np.array([[2, 1 + 2j], [4, 5]]).astype(dtype), sess.run(y, {p: 1}))
def testSparseRead0DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8j, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read(2)
self.assertAllClose(
np.array([8j, 9, 10, 11]).astype(dtype), self.evaluate(x))
def testSparseRead1DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6j, 7], [8, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([2, 1])
self.assertAllClose(
np.array([[8, 9, 10, 11], [4, 5, 6j, 7]]).astype(dtype),
self.evaluate(x))
def testSparseRead2DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2j, 3], [4, 5, 6, 7], [8, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [0, 2]])
self.assertAllClose(
np.array([[[8, 9, 10, 11], [4, 5, 6, 7]],
[[0, 1, 2j, 3], [8, 9, 10, 11]]]).astype(dtype),
self.evaluate(x))
def testSparseRead2DIndices3DTensor(self):
for dtype in self.numeric_types:
init = np.array([[[0, 1, 2], [3, 4, 5]], [[10, 11, 12], [13, 14, 15]],
[[20, 21, 22], [23, 24j, 25]],
[[30, 31, 32], [33, 34, 35]]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [3, 0]])
self.assertAllClose(
np.array(
[[[[20, 21, 22], [23, 24j, 25]], [[10, 11, 12], [13, 14, 15]]],
[[[30, 31, 32], [33, 34, 35]], [[0, 1, 2], [3, 4, 5]]]
],).astype(dtype), self.evaluate(x))
def testShape(self):
for dtype in self.numeric_types:
init = np.ones([2, 3]).astype(dtype)
with self.test_session() as session, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
session.run(variables.variables_initializer([v]))
h = v.handle
s32, s64 = session.run([
resource_variable_ops.variable_shape(h),
resource_variable_ops.variable_shape(h, out_type=dtypes.int64)
])
self.assertEqual(s32.dtype, np.int32)
self.assertEqual(s64.dtype, np.int64)
self.assertAllEqual(s32, [2, 3])
self.assertAllEqual(s64, [2, 3])
def testReadWrite(self):
"""Tests initialization, reading, and writing a resource variable."""
for dtype in self.numeric_types:
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=dtype,
initializer=init_ops.constant_initializer(2))
a = x.read_value()
with ops.control_dependencies([a]):
b = state_ops.assign(x, dtype(47))
with ops.control_dependencies([b]):
c = x.read_value()
with ops.control_dependencies([c]):
d = state_ops.assign_add(x, np.array(6 + 2j).astype(dtype))
with ops.control_dependencies([d]):
e = state_ops.assign_sub(x, dtype(3))
with ops.control_dependencies([e]):
f = x.read_value()
session.run(variables.global_variables_initializer())
v1, v2, v3 = session.run([a, c, f])
self.assertAllClose(dtype(2), v1)
self.assertAllClose(dtype(47), v2)
self.assertAllClose(np.array(50 + 2j).astype(dtype), v3)
def testTraining(self):
"""Tests a gradient descent step for a simple model."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(dtypes.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
session.run(variables.global_variables_initializer())
session.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
vw, vb = session.run([w, b])
self.assertAllClose(
np.array(
[[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32), vb, rtol=1e-4)
def testWriteOfAliasedTensor(self):
for dtype in self.numeric_types:
init = np.array([[1, 2j], [3, 4]]).astype(dtype)
update = np.array([[7, 1j], [2, 11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
p = array_ops.placeholder(dtype)
q = array_ops.identity(p)
x = v.read_value()
# Writes the value of 'p' to 'v', but keeps a reference to the original
# value of 'v' so the variable update cannot reuse its buffer.
with ops.control_dependencies([x]):
y = v.assign(q)
result = sess.run([x, y, q], {p: update})
self.assertAllClose(init, result[0])
self.assertAllClose(update, result[1])
self.assertAllClose(update, result[2])
def testScatterAdd(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[2, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1], [7]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(read), [[3], [7]])
def testScatterSub(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[2, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[4], [1]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_sub(
handle, [1], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(read), [[4], [-1]])
def testScatterMul(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testScatterDiv(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(read), [[2]])
def testScatterMin(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testScatterMax(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
def testScatterUpdate(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testScatterAddScalar(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testScatterSubScalar(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
def testScatterMulScalar(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testScatterDivScalar(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testScatterMinScalar(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testScatterMaxScalar(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
sess.run(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
def testScatterNdAddOps(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.float32, shape=[8])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1] * 8, dtype=dtypes.float32)))
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 12, 1, 11, 10, 1, 1, 13])
sess.run(gen_state_ops.resource_scatter_nd_add(handle, indices, updates))
read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.float32)
self.assertAllClose(expected, self.evaluate(read))
def testScatterNdUpdateAddOps(self):
with self.test_session() as sess, self.test_scope():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.float32, shape=[8])
sess.run(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1] * 8, dtype=dtypes.float32)))
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 11, 1, 10, 9, 1, 1, 12])
sess.run(
gen_state_ops.resource_scatter_nd_update(handle, indices, updates))
read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.float32)
self.assertAllClose(expected, self.evaluate(read))
class StridedSliceAssignChecker(object):
"""Compares the results of a slice assignment using Tensorflow and numpy."""
def __init__(self, test, x, dtype):
self.dtype = dtype
self.test = test
self.x_np = np.array(x).astype(dtype)
# Randomly start on mode 0 or 1.
self.which_mode = np.random.randint(2, size=1)[0]
def __setitem__(self, index, value):
self.which_mode = 1 - self.which_mode
value = np.array(value).astype(self.dtype)
with self.test.test_session() as sess, self.test.test_scope():
x = constant_op.constant(self.x_np, dtype=self.dtype)
var = resource_variable_ops.ResourceVariable(x)
sess.run(variables.variables_initializer([var]))
if self.which_mode == 0:
val = sess.run(var[index].assign(value))
else:
assert self.which_mode == 1
val = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
class SliceAssignTest(xla_test.XLATestCase):
def testSliceAssign(self):
for dtype in self.numeric_types:
checker = StridedSliceAssignChecker(
self, [[1, 2, 3], [4, 5, 6]], dtype=dtype)
# No-op assignment
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Checks trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrink shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
if dtype != dtypes.bfloat16.as_numpy_dtype:
# TODO(b/68813416): valnp call above results in an ndarray and not a
# number for bfloat16s.
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, 1::-1] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222, dtype=dtype)
if dtype != dtypes.bfloat16.as_numpy_dtype:
# TODO(b/68813416): valnp call above results in an ndarray and not a
# number for bfloat16s.
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testUninitialized(self):
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"uninitialized variable"):
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable([1, 2])
sess.run(v[:].assign([1, 2]))
if __name__ == "__main__":
googletest.main()
|
jbedorf/tensorflow
|
tensorflow/compiler/tests/variable_ops_test.py
|
Python
|
apache-2.0
| 21,657 | 0.008635 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Inmarsat Playback
# Generated: Mon Aug 21 21:42:34 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import mapper
import math
import sip
import sys
from gnuradio import qtgui
class inmarsat_playback(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Inmarsat Playback")
Qt.QWidget.__init__(self)
self.setWindowTitle("Inmarsat Playback")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "inmarsat_playback")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 250e3
self.interp = interp = 48
self.decim = decim = 250
self.baud = baud = 1200
self.samp_per_sym = samp_per_sym = (samp_rate/decim*interp)/baud
self.alpha = alpha = 0.5
self.xlate_filter_taps = xlate_filter_taps = firdes.low_pass(1,samp_rate, samp_rate/2, 1000, firdes.WIN_HAMMING, 6.76)
self.rrc_filter_taps = rrc_filter_taps = firdes.root_raised_cosine(32, 1.0, 1.0/(samp_per_sym*32), alpha, int(samp_per_sym*32))
self.loop_bw = loop_bw = 300
self.freq_correct = freq_correct = -28.7e3
self.delay = delay = 0
self.cutoff = cutoff = 4800
self.cols = cols = 54
##################################################
# Blocks
##################################################
self._freq_correct_tool_bar = Qt.QToolBar(self)
self._freq_correct_tool_bar.addWidget(Qt.QLabel("freq_correct"+": "))
self._freq_correct_line_edit = Qt.QLineEdit(str(self.freq_correct))
self._freq_correct_tool_bar.addWidget(self._freq_correct_line_edit)
self._freq_correct_line_edit.returnPressed.connect(
lambda: self.set_freq_correct(eng_notation.str_to_num(str(self._freq_correct_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._freq_correct_tool_bar, 12,0,1,2)
self._delay_tool_bar = Qt.QToolBar(self)
self._delay_tool_bar.addWidget(Qt.QLabel("delay"+": "))
self._delay_line_edit = Qt.QLineEdit(str(self.delay))
self._delay_tool_bar.addWidget(self._delay_line_edit)
self._delay_line_edit.returnPressed.connect(
lambda: self.set_delay(int(str(self._delay_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._delay_tool_bar, 12,2,1,2)
self._cutoff_tool_bar = Qt.QToolBar(self)
self._cutoff_tool_bar.addWidget(Qt.QLabel("cutoff"+": "))
self._cutoff_line_edit = Qt.QLineEdit(str(self.cutoff))
self._cutoff_tool_bar.addWidget(self._cutoff_line_edit)
self._cutoff_line_edit.returnPressed.connect(
lambda: self.set_cutoff(eng_notation.str_to_num(str(self._cutoff_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._cutoff_tool_bar, 13,0,1,2)
self._cols_range = Range(1, 500, 1, 54, 200)
self._cols_win = RangeWidget(self._cols_range, self.set_cols, "cols", "counter_slider", float)
self.top_grid_layout.addWidget(self._cols_win, 12,4,1,4)
self.rational_resampler_xxx_0_0 = filter.rational_resampler_ccc(
interpolation=interp,
decimation=decim,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=1,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/decim*interp, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.01)
self.qtgui_waterfall_sink_x_0_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(-60, -10)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 4,4,4,4)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.01)
self.qtgui_waterfall_sink_x_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_win, 4,0,4,4)
self.qtgui_time_raster_sink_x_0 = qtgui.time_raster_sink_b(
samp_rate,
20,
cols,
([]),
([]),
"",
1,
)
self.qtgui_time_raster_sink_x_0.set_update_time(0.10)
self.qtgui_time_raster_sink_x_0.set_intensity_range(-1, 1)
self.qtgui_time_raster_sink_x_0.enable_grid(False)
self.qtgui_time_raster_sink_x_0.enable_axis_labels(True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_raster_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_raster_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_raster_sink_x_0.set_color_map(i, colors[i])
self.qtgui_time_raster_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_raster_sink_x_0_win = sip.wrapinstance(self.qtgui_time_raster_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_raster_sink_x_0_win, 8,4,4,4)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_f(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/decim*interp, #bw
"baud", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.10)
self.qtgui_freq_sink_x_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_1.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(False)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_1.enable_axis_labels(True)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_1.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_1_win)
self.qtgui_freq_sink_x_0_0_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/decim*interp, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0_0.set_update_time(0.01)
self.qtgui_freq_sink_x_0_0_0.set_y_axis(-60, -10)
self.qtgui_freq_sink_x_0_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0_0.set_fft_average(0.2)
self.qtgui_freq_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['', 'corrected', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_0_0_win, 0,4,4,4)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"pre-d", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.01)
self.qtgui_freq_sink_x_0.set_y_axis(-100, -20)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(0.2)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['pre-d', 'corrected', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win, 0,0,4,4)
self.qtgui_const_sink_x_0 = qtgui.const_sink_c(
1024, #size
"", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0.set_update_time(0.10)
self.qtgui_const_sink_x_0.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0.enable_autoscale(False)
self.qtgui_const_sink_x_0.enable_grid(False)
self.qtgui_const_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_const_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_win, 8,0,1,4)
self.mapper_demapper_soft_0 = mapper.demapper_soft(mapper.BPSK, ([0,1]))
self.low_pass_filter_0_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate/decim*interp, cutoff, 1000, firdes.WIN_HAMMING, 6.76))
self.freq_xlating_fir_filter_xxx_0_1 = filter.freq_xlating_fir_filter_ccc(1, (xlate_filter_taps), freq_correct, samp_rate)
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_ccc(1, (xlate_filter_taps), 0, samp_rate)
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.digital_costas_loop_cc_0 = digital.costas_loop_cc(math.pi*2/loop_bw, 2, False)
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_cc(samp_per_sym*(1+0.0), 0.25*0.175*0.175, 0.5, 0.175, 0.005)
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, '/home/zleffke/github/eclipse2017/waveforms/playback/GS1_4F3_20170821_151602.312844_UTC_250k.fc32', True)
self.blocks_delay_1 = blocks.delay(gr.sizeof_char*1, delay)
self.blocks_delay_0 = blocks.delay(gr.sizeof_gr_complex*1, 1)
self.blocks_conjugate_cc_0 = blocks.conjugate_cc()
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
self.analog_agc2_xx_0 = analog.agc2_cc(1e-4, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0.set_max_gain(65536)
##################################################
# Connections
##################################################
self.connect((self.analog_agc2_xx_0, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.qtgui_freq_sink_x_1, 0))
self.connect((self.blocks_conjugate_cc_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_delay_0, 0), (self.blocks_conjugate_cc_0, 0))
self.connect((self.blocks_delay_1, 0), (self.qtgui_time_raster_sink_x_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.digital_diff_decoder_bb_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.mapper_demapper_soft_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.qtgui_const_sink_x_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.blocks_delay_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.blocks_delay_1, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.freq_xlating_fir_filter_xxx_0_1, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0_1, 0), (self.analog_agc2_xx_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.digital_costas_loop_cc_0, 0))
self.connect((self.mapper_demapper_soft_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.qtgui_waterfall_sink_x_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.qtgui_freq_sink_x_0_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "inmarsat_playback")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_xlate_filter_taps(firdes.low_pass(1,self.samp_rate, self.samp_rate/2, 1000, firdes.WIN_HAMMING, 6.76))
self.set_samp_per_sym((self.samp_rate/self.decim*self.interp)/self.baud)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_waterfall_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate/self.decim*self.interp, self.cutoff, 1000, firdes.WIN_HAMMING, 6.76))
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
def get_interp(self):
return self.interp
def set_interp(self, interp):
self.interp = interp
self.set_samp_per_sym((self.samp_rate/self.decim*self.interp)/self.baud)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate/self.decim*self.interp, self.cutoff, 1000, firdes.WIN_HAMMING, 6.76))
def get_decim(self):
return self.decim
def set_decim(self, decim):
self.decim = decim
self.set_samp_per_sym((self.samp_rate/self.decim*self.interp)/self.baud)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/self.decim*self.interp)
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate/self.decim*self.interp, self.cutoff, 1000, firdes.WIN_HAMMING, 6.76))
def get_baud(self):
return self.baud
def set_baud(self, baud):
self.baud = baud
self.set_samp_per_sym((self.samp_rate/self.decim*self.interp)/self.baud)
def get_samp_per_sym(self):
return self.samp_per_sym
def set_samp_per_sym(self, samp_per_sym):
self.samp_per_sym = samp_per_sym
self.digital_clock_recovery_mm_xx_0.set_omega(self.samp_per_sym*(1+0.0))
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
def get_xlate_filter_taps(self):
return self.xlate_filter_taps
def set_xlate_filter_taps(self, xlate_filter_taps):
self.xlate_filter_taps = xlate_filter_taps
self.freq_xlating_fir_filter_xxx_0_1.set_taps((self.xlate_filter_taps))
self.freq_xlating_fir_filter_xxx_0.set_taps((self.xlate_filter_taps))
def get_rrc_filter_taps(self):
return self.rrc_filter_taps
def set_rrc_filter_taps(self, rrc_filter_taps):
self.rrc_filter_taps = rrc_filter_taps
def get_loop_bw(self):
return self.loop_bw
def set_loop_bw(self, loop_bw):
self.loop_bw = loop_bw
self.digital_costas_loop_cc_0.set_loop_bandwidth(math.pi*2/self.loop_bw)
def get_freq_correct(self):
return self.freq_correct
def set_freq_correct(self, freq_correct):
self.freq_correct = freq_correct
Qt.QMetaObject.invokeMethod(self._freq_correct_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.freq_correct)))
self.freq_xlating_fir_filter_xxx_0_1.set_center_freq(self.freq_correct)
def get_delay(self):
return self.delay
def set_delay(self, delay):
self.delay = delay
Qt.QMetaObject.invokeMethod(self._delay_line_edit, "setText", Qt.Q_ARG("QString", str(self.delay)))
self.blocks_delay_1.set_dly(self.delay)
def get_cutoff(self):
return self.cutoff
def set_cutoff(self, cutoff):
self.cutoff = cutoff
Qt.QMetaObject.invokeMethod(self._cutoff_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.cutoff)))
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate/self.decim*self.interp, self.cutoff, 1000, firdes.WIN_HAMMING, 6.76))
def get_cols(self):
return self.cols
def set_cols(self, cols):
self.cols = cols
self.qtgui_time_raster_sink_x_0.set_num_cols(self.cols)
def main(top_block_cls=inmarsat_playback, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
Eclipse-2017/waveforms
|
playback/inmarsat_playback.py
|
Python
|
gpl-3.0
| 26,718 | 0.010517 |
from django.contrib.auth.models import User
from django.test import TestCase
import autofixture
autofixture.autodiscover()
class AutodiscoverTestCase(TestCase):
def test_builtin_fixtures(self):
from autofixture.autofixtures import UserFixture
self.assertEqual(autofixture.REGISTRY[User], UserFixture)
|
gregmuellegger/django-autofixture
|
autofixture_tests/tests/test_autodiscover.py
|
Python
|
bsd-3-clause
| 325 | 0 |
from __future__ import print_function, division, absolute_import
import unittest
from cu2qu.pens import Cu2QuPen, Cu2QuPointPen
from . import CUBIC_GLYPHS, QUAD_GLYPHS
from .utils import DummyGlyph, DummyPointGlyph
from .utils import DummyPen, DummyPointPen
from fontTools.misc.loggingTools import CapturingLogHandler
from textwrap import dedent
import logging
MAX_ERR = 1.0
class _TestPenMixin(object):
"""Collection of tests that are shared by both the SegmentPen and the
PointPen test cases, plus some helper methods.
"""
maxDiff = None
def diff(self, expected, actual):
import difflib
expected = str(self.Glyph(expected)).splitlines(True)
actual = str(self.Glyph(actual)).splitlines(True)
diff = difflib.unified_diff(
expected, actual, fromfile='expected', tofile='actual')
return "".join(diff)
def convert_glyph(self, glyph, **kwargs):
# draw source glyph onto a new glyph using a Cu2Qu pen and return it
converted = self.Glyph()
pen = getattr(converted, self.pen_getter_name)()
quadpen = self.Cu2QuPen(pen, MAX_ERR, **kwargs)
getattr(glyph, self.draw_method_name)(quadpen)
return converted
def expect_glyph(self, source, expected):
converted = self.convert_glyph(source)
self.assertNotEqual(converted, source)
if not converted.approx(expected):
print(self.diff(expected, converted))
self.fail("converted glyph is different from expected")
def test_convert_simple_glyph(self):
self.expect_glyph(CUBIC_GLYPHS['a'], QUAD_GLYPHS['a'])
self.expect_glyph(CUBIC_GLYPHS['A'], QUAD_GLYPHS['A'])
def test_convert_composite_glyph(self):
source = CUBIC_GLYPHS['Aacute']
converted = self.convert_glyph(source)
# components don't change after quadratic conversion
self.assertEqual(converted, source)
def test_convert_mixed_glyph(self):
# this contains a mix of contours and components
self.expect_glyph(CUBIC_GLYPHS['Eacute'], QUAD_GLYPHS['Eacute'])
def test_reverse_direction(self):
for name in ('a', 'A', 'Eacute'):
source = CUBIC_GLYPHS[name]
normal_glyph = self.convert_glyph(source)
reversed_glyph = self.convert_glyph(source, reverse_direction=True)
# the number of commands is the same, just their order is iverted
self.assertTrue(
len(normal_glyph.outline), len(reversed_glyph.outline))
self.assertNotEqual(normal_glyph, reversed_glyph)
def test_stats(self):
stats = {}
for name in CUBIC_GLYPHS.keys():
source = CUBIC_GLYPHS[name]
self.convert_glyph(source, stats=stats)
self.assertTrue(stats)
self.assertTrue('1' in stats)
self.assertEqual(type(stats['1']), int)
def test_addComponent(self):
pen = self.Pen()
quadpen = self.Cu2QuPen(pen, MAX_ERR)
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
self.assertEqual(str(pen).splitlines(), [
"pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
])
class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
def __init__(self, *args, **kwargs):
super(TestCu2QuPen, self).__init__(*args, **kwargs)
self.Glyph = DummyGlyph
self.Pen = DummyPen
self.Cu2QuPen = Cu2QuPen
self.pen_getter_name = 'getPen'
self.draw_method_name = 'draw'
def test__check_contour_is_open(self):
msg = "moveTo is required"
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
with self.assertRaisesRegex(AssertionError, msg):
quadpen.lineTo((0, 0))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.qCurveTo((0, 0), (1, 1))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.curveTo((0, 0), (1, 1), (2, 2))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.closePath()
with self.assertRaisesRegex(AssertionError, msg):
quadpen.endPath()
quadpen.moveTo((0, 0)) # now it works
quadpen.lineTo((1, 1))
quadpen.qCurveTo((2, 2), (3, 3))
quadpen.curveTo((4, 4), (5, 5), (6, 6))
quadpen.closePath()
def test__check_contour_closed(self):
msg = "closePath or endPath is required"
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.moveTo((1, 1))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
# it works if contour is closed
quadpen.closePath()
quadpen.moveTo((1, 1))
quadpen.endPath()
quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
def test_qCurveTo_no_points(self):
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(
AssertionError, "illegal qcurve segment point count: 0"):
quadpen.qCurveTo()
def test_qCurveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.lineTo((1, 1))",
])
def test_qCurveTo_more_than_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((1, 1), (2, 2))",
])
def test_curveTo_no_points(self):
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(
AssertionError, "illegal curve segment point count: 0"):
quadpen.curveTo()
def test_curveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.lineTo((1, 1))",
])
def test_curveTo_2_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((1, 1), (2, 2))",
])
def test_curveTo_3_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((0.75, 0.75), (2.25, 2.25), (3, 3))",
])
def test_curveTo_more_than_3_points(self):
# a 'SuperBezier' as described in fontTools.basePen.AbstractPen
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3), (4, 4))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((0.75, 0.75), (1.625, 1.625), (2, 2))",
"pen.qCurveTo((2.375, 2.375), (3.25, 3.25), (4, 4))",
])
def test_addComponent(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
self.assertEqual(str(pen).splitlines(), [
"pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
])
def test_ignore_single_points(self):
pen = DummyPen()
try:
logging.captureWarnings(True)
with CapturingLogHandler("py.warnings", level="WARNING") as log:
quadpen = Cu2QuPen(pen, MAX_ERR, ignore_single_points=True)
finally:
logging.captureWarnings(False)
quadpen.moveTo((0, 0))
quadpen.endPath()
quadpen.moveTo((1, 1))
quadpen.closePath()
self.assertGreaterEqual(len(log.records), 1)
self.assertIn("ignore_single_points is deprecated",
log.records[0].args[0])
# single-point contours were ignored, so the pen commands are empty
self.assertFalse(pen.commands)
# redraw without ignoring single points
quadpen.ignore_single_points = False
quadpen.moveTo((0, 0))
quadpen.endPath()
quadpen.moveTo((1, 1))
quadpen.closePath()
self.assertTrue(pen.commands)
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.endPath()",
"pen.moveTo((1, 1))",
"pen.closePath()"
])
class TestCu2QuPointPen(unittest.TestCase, _TestPenMixin):
def __init__(self, *args, **kwargs):
super(TestCu2QuPointPen, self).__init__(*args, **kwargs)
self.Glyph = DummyPointGlyph
self.Pen = DummyPointPen
self.Cu2QuPen = Cu2QuPointPen
self.pen_getter_name = 'getPointPen'
self.draw_method_name = 'drawPoints'
def test_super_bezier_curve(self):
pen = DummyPointPen()
quadpen = Cu2QuPointPen(pen, MAX_ERR)
quadpen.beginPath()
quadpen.addPoint((0, 0), segmentType="move")
quadpen.addPoint((1, 1))
quadpen.addPoint((2, 2))
quadpen.addPoint((3, 3))
quadpen.addPoint(
(4, 4), segmentType="curve", smooth=False, name="up", selected=1)
quadpen.endPath()
self.assertEqual(str(pen).splitlines(), """\
pen.beginPath()
pen.addPoint((0, 0), name=None, segmentType='move', smooth=False)
pen.addPoint((0.75, 0.75), name=None, segmentType=None, smooth=False)
pen.addPoint((1.625, 1.625), name=None, segmentType=None, smooth=False)
pen.addPoint((2, 2), name=None, segmentType='qcurve', smooth=True)
pen.addPoint((2.375, 2.375), name=None, segmentType=None, smooth=False)
pen.addPoint((3.25, 3.25), name=None, segmentType=None, smooth=False)
pen.addPoint((4, 4), name='up', segmentType='qcurve', selected=1, smooth=False)
pen.endPath()""".splitlines())
def test__flushContour_restore_starting_point(self):
pen = DummyPointPen()
quadpen = Cu2QuPointPen(pen, MAX_ERR)
# collect the output of _flushContour before it's sent to _drawPoints
new_segments = []
def _drawPoints(segments):
new_segments.extend(segments)
Cu2QuPointPen._drawPoints(quadpen, segments)
quadpen._drawPoints = _drawPoints
# a closed path (ie. no "move" segmentType)
quadpen._flushContour([
("curve", [
((2, 2), False, None, {}),
((1, 1), False, None, {}),
((0, 0), False, None, {}),
]),
("curve", [
((1, 1), False, None, {}),
((2, 2), False, None, {}),
((3, 3), False, None, {}),
]),
])
# the original starting point is restored: the last segment has become
# the first
self.assertEqual(new_segments[0][1][-1][0], (3, 3))
self.assertEqual(new_segments[-1][1][-1][0], (0, 0))
new_segments = []
# an open path (ie. starting with "move")
quadpen._flushContour([
("move", [
((0, 0), False, None, {}),
]),
("curve", [
((1, 1), False, None, {}),
((2, 2), False, None, {}),
((3, 3), False, None, {}),
]),
])
# the segment order stays the same before and after _flushContour
self.assertEqual(new_segments[0][1][-1][0], (0, 0))
self.assertEqual(new_segments[-1][1][-1][0], (3, 3))
def test_quad_no_oncurve(self):
"""When passed a contour which has no on-curve points, the
Cu2QuPointPen will treat it as a special quadratic contour whose
first point has 'None' coordinates.
"""
self.maxDiff = None
pen = DummyPointPen()
quadpen = Cu2QuPointPen(pen, MAX_ERR)
quadpen.beginPath()
quadpen.addPoint((1, 1))
quadpen.addPoint((2, 2))
quadpen.addPoint((3, 3))
quadpen.endPath()
self.assertEqual(
str(pen),
dedent(
"""\
pen.beginPath()
pen.addPoint((1, 1), name=None, segmentType=None, smooth=False)
pen.addPoint((2, 2), name=None, segmentType=None, smooth=False)
pen.addPoint((3, 3), name=None, segmentType=None, smooth=False)
pen.endPath()"""
)
)
if __name__ == "__main__":
unittest.main()
|
googlefonts/cu2qu
|
tests/pens_test.py
|
Python
|
apache-2.0
| 13,027 | 0.000077 |
# See http://zulip.readthedocs.io/en/latest/events-system.html for
# high-level documentation on how this system works.
from __future__ import absolute_import
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Text, Union
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.timezone import now as timezone_now
from collections import deque
import datetime
import os
import time
import socket
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.autoreload
import tornado.ioloop
import random
import traceback
from zerver.models import UserProfile, Client
from zerver.decorator import RespondAsynchronously
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
import copy
import six
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor(object):
def __init__(self, user_profile_id, user_profile_email, realm_id, event_queue,
event_types, client_type_name, apply_markdown=True,
all_public_streams=False, lifespan_secs=0, narrow=[]):
# type: (int, Text, int, EventQueue, Optional[Sequence[str]], Text, bool, bool, int, Iterable[Sequence[Text]]) -> None
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.current_client_name = None # type: Optional[Text]
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.add_timeout
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self):
# type: () -> str
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
ret = cls(d['user_profile_id'], d['user_profile_email'], d['realm_id'],
EventQueue.from_dict(d['event_queue']), d['event_types'],
d['client_type_name'], d['apply_markdown'], d['all_public_streams'],
d['queue_timeout'], d.get('narrow', []))
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self):
# type: () -> None
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event):
# type: (Dict[str, Any]) -> None
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self):
# type: () -> bool
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event):
# type: (Mapping[str, Any]) -> bool
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self):
# type: () -> bool
return self.event_types is None or "message" in self.event_types
def idle(self, now):
# type: (float) -> bool
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id, client_name):
# type: (int, Text) -> None
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback():
# type: () -> None
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
heartbeat_time = time.time() + HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != 'API: heartbeat test':
self._timeout_handle = ioloop.add_timeout(heartbeat_time, timeout_callback)
def disconnect_handler(self, client_closed=False):
# type: (bool) -> None
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id, None)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info("Client disconnected for queue %s (%s via %s)" %
(self.event_queue.id, self.user_profile_email,
self.current_client_name))
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self):
# type: () -> None
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can cleanup their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
{self.realm_id})
def compute_full_event_type(event):
# type: (Mapping[str, Any]) -> str
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue(object):
def __init__(self, id):
# type: (str) -> None
self.queue = deque() # type: ignore # type signature should Deque[Dict[str, Any]] but we need https://github.com/python/mypy/pull/2845 to be merged
self.next_event_id = 0 # type: int
self.id = id # type: str
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d):
# type: (Dict[str, Any]) -> EventQueue
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event):
# type: (Dict[str, Any]) -> None
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self):
# type: () -> Dict[str, Any]
return self.queue.popleft()
def empty(self):
# type: () -> bool
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id):
# type: (int) -> None
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self):
# type: () -> List[Dict[str, Any]]
contents = [] # type: List[Dict[str, Any]]
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {} # type: Dict[str, ClientDescriptor]
# maps user id to list of client descriptors
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
next_queue_id = 0
def add_client_gc_hook(hook):
# type: (Callable[[int, ClientDescriptor, bool], None]) -> None
gc_hooks.append(hook)
def get_client_descriptor(queue_id):
# type: (str) -> ClientDescriptor
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id):
# type: (int) -> List[ClientDescriptor]
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id):
# type: (int) -> List[ClientDescriptor]
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client):
# type: (ClientDescriptor) -> None
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove, affected_users, affected_realms):
# type: (AbstractSet[str], AbstractSet[int], AbstractSet[int]) -> None
def filter_client_dict(client_dict, key):
# type: (MutableMapping[int, List[ClientDescriptor]], int) -> None
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues():
# type: () -> None
start = time.time()
to_remove = set() # type: Set[str]
affected_users = set() # type: Set[int]
affected_realms = set() # type: Set[int]
for (id, client) in six.iteritems(clients):
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle and thus
# not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.' +
' Now %d active queues, %s')
% (len(to_remove), len(affected_users), time.time() - start,
len(clients), handler_stats_string()))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def dump_event_queues():
# type: () -> None
start = time.time()
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in six.iteritems(clients)],
stored_queues)
logging.info('Tornado dumped %d event queues in %.3fs'
% (len(clients), time.time() - start))
def load_event_queues():
# type: () -> None
global clients
start = time.time()
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Could not deserialize event queues")
except (IOError, EOFError):
pass
for client in six.itervalues(clients):
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado loaded %d event queues in %.3fs'
% (len(clients), time.time() - start))
def send_restart_events(immediate=False):
# type: (bool) -> None
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
if immediate:
event['immediate'] = True
for client in six.itervalues(clients):
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue():
# type: () -> None
if not settings.TEST_SUITE:
load_event_queues()
atexit.register(dump_event_queues)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1)) # type: ignore # https://github.com/python/mypy/issues/2955
tornado.autoreload.add_reload_hook(dump_event_queues) # type: ignore # TODO: Fix missing tornado.autoreload stub
try:
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query):
# type: (Mapping[str, Any]) -> Dict[str, Any]
queue_id = query["queue_id"] # type: str
dont_block = query["dont_block"] # type: bool
last_event_id = query["last_event_id"] # type: int
user_profile_id = query["user_profile_id"] # type: int
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
user_profile_email = query["user_profile_email"] # type: Text
client_type_name = query["client_type_name"] # type: Text
handler_id = query["handler_id"] # type: int
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if client is None:
raise JsonableError(_("Bad event queue id: %s") % (queue_id,))
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response = dict(events=client.event_queue.contents(),
handler_id=handler_id) # type: Dict[str, Any]
if orig_queue_id is None:
response['queue_id'] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
response["events"][0]["type"])
else:
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
client_type_name))
except JsonableError as e:
if hasattr(e, 'to_json_error_msg') and callable(e.to_json_error_msg):
return dict(type="error", handler_id=handler_id,
message=e.to_json_error_msg())
raise e
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
# type: (requests.Response) -> Dict[str, Any]
if requests_json_is_function:
return resp.json()
else:
return resp.json # type: ignore # mypy trusts the stub, not the runtime type checking of this fn
def request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types=None, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, int, Optional[Iterable[str]], bool, Iterable[Sequence[Text]]) -> Optional[str]
if settings.TORNADO_SERVER:
req = {'dont_block': 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'all_public_streams': ujson.dumps(all_public_streams),
'client': 'internal',
'user_client': user_client.name,
'narrow': ujson.dumps(narrow),
'lifespan_secs': queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
try:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params=req)
except requests.adapters.ConnectionError:
logging.error('Tornado server does not seem to be running, check %s '
'and %s for more information.' %
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
raise requests.adapters.ConnectionError(
"Django cannot connect to Tornado server (%s); try restarting" %
(settings.TORNADO_SERVER))
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile, queue_id, last_event_id):
# type: (UserProfile, str, int) -> List[Dict]
if settings.TORNADO_SERVER:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params={'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'client': 'internal'})
resp.raise_for_status()
return extract_json_response(resp)['events']
return []
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id, message_id):
# type: (int, int) -> Dict[str, Any]
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id, queue, last_for_client):
# type: (int, ClientDescriptor, bool) -> None
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
message_ids_to_notify = [] # type: List[Dict[str, Any]]
for event in queue.event_queue.contents():
if not event['type'] == 'message' or not event['flags']:
continue
if 'mentioned' in event['flags'] and 'read' not in event['flags']:
notify_info = dict(message_id=event['message']['id'])
if not event.get('push_notified', False):
notify_info['send_push'] = True
if not event.get('email_notified', False):
notify_info['send_email'] = True
message_ids_to_notify.append(notify_info)
for notify_info in message_ids_to_notify:
msg_id = notify_info['message_id']
notice = build_offline_notification(user_profile_id, msg_id)
if notify_info.get('send_push', False):
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
if notify_info.get('send_email', False):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
def receiver_is_idle(user_profile_id, realm_presences):
# type: (int, Optional[Dict[int, Dict[Text, Dict[str, Any]]]]) -> bool
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
# It's possible a recipient is not in the realm of a sender. We don't have
# presence information in this case (and it's hard to get without an additional
# db query) so we simply don't try to guess if this cross-realm recipient
# has been idle for too long
if realm_presences is None or user_profile_id not in realm_presences:
return off_zulip
# We want to find the newest "active" presence entity and compare that to the
# activity expiry threshold.
user_presence = realm_presences[user_profile_id]
latest_active_timestamp = None
idle = False
for client, status in six.iteritems(user_presence):
if (latest_active_timestamp is None or status['timestamp'] > latest_active_timestamp) and \
status['status'] == 'active':
latest_active_timestamp = status['timestamp']
if latest_active_timestamp is None:
idle = True
else:
active_datetime = timestamp_to_datetime(latest_active_timestamp)
# 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS
idle = timezone_now() - active_datetime > datetime.timedelta(seconds=140)
return off_zulip or idle
def process_message_event(event_template, users):
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
realm_presences = {int(k): v for k, v in event_template['presences'].items()} # type: Dict[int, Dict[Text, Dict[str, Any]]]
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
message_dict_markdown = event_template['message_dict_markdown'] # type: Dict[str, Any]
message_dict_no_markdown = event_template['message_dict_no_markdown'] # type: Dict[str, Any]
sender_id = message_dict_markdown['sender_id'] # type: int
message_id = message_dict_markdown['id'] # type: int
message_type = message_dict_markdown['type'] # type: str
sending_client = message_dict_markdown['client'] # type: Text
# To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
send_to_clients = {} # type: Dict[str, Dict[str, Any]]
# Extra user-specific data to include
extra_user_data = {} # type: Dict[int, Any]
if 'stream_name' in event_template and not event_template.get("invite_only"):
for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
received_pm = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags
idle = receiver_is_idle(user_profile_id, realm_presences)
always_push_notify = user_data.get('always_push_notify', False)
if (received_pm or mentioned) and (idle or always_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
notified = dict(push_notified=True) # type: Dict[str, bool]
# Don't send missed message emails if always_push_notify is True
if idle:
# We require RabbitMQ to do this, as we can't call the email handler
# from the Tornado process. So if there's no rabbitmq support do nothing
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
extra_user_data[user_profile_id] = notified
for client_data in six.itervalues(send_to_clients):
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False) # type: bool
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
if client.apply_markdown:
message_dict = message_dict_markdown
else:
message_dict = message_dict_no_markdown
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
if flags is not None:
message_dict['is_mentioned'] = 'mentioned' in flags
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type_name.lower()):
continue
client.add_event(user_event)
def process_event(event, users):
# type: (Mapping[str, Any], Iterable[int]) -> None
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(dict(event))
def process_userdata_event(event_template, users):
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_notification(notice):
# type: (Mapping[str, Any]) -> None
event = notice['event'] # type: Mapping[str, Any]
users = notice['users'] # type: Union[Iterable[int], Iterable[Mapping[str, Any]]]
if event['type'] in ["update_message", "delete_message"]:
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "message":
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
else:
process_event(event, cast(Iterable[int], users))
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(data):
# type: (Mapping[str, Any]) -> None
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
requests_client.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_notification(data):
# type: (Mapping[str, Any]) -> None
queue_json_publish("notify_tornado", data, send_notification_http)
def send_event(event, users):
# type: (Mapping[str, Any], Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
queue_json_publish("notify_tornado",
dict(event=event, users=users),
send_notification_http)
|
jrowan/zulip
|
zerver/tornado/event_queue.py
|
Python
|
apache-2.0
| 37,008 | 0.002972 |
# Directives using the toolchain
# documentation.
import docutils
def setup(app):
app.add_object_type("asmdirective", "asmdir");
app.add_object_type("asminstruction", "asminst");
app.add_object_type("ppexpressionop", "ppexprop");
app.add_object_type("ppdirective", "ppdir");
app.add_object_type("literal", "lit");
|
DCPUTeam/DCPUToolchain
|
docs/sphinxext/toolchain.py
|
Python
|
mit
| 335 | 0.020896 |
import os
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
redshift_username = os.environ.get("REDSHIFT_USERNAME")
redshift_password = os.environ.get("REDSHIFT_PASSWORD")
redshift_host = os.environ.get("REDSHIFT_HOST")
redshift_port = os.environ.get("REDSHIFT_PORT")
redshift_database = os.environ.get("REDSHIFT_DATABASE")
redshift_sslmode = os.environ.get("REDSHIFT_SSLMODE")
CONNECTION_STRING = f"postgresql+psycopg2://{redshift_username}:{redshift_password}@{redshift_host}:{redshift_port}/{redshift_database}?sslmode={redshift_sslmode}"
# This utility is not for general use. It is only to support testing.
from tests.test_utils import load_data_into_test_database
load_data_into_test_database(
table_name="taxi_data",
csv_path="./data/yellow_tripdata_sample_2019-01.csv",
connection_string=CONNECTION_STRING,
)
context = ge.get_context()
datasource_yaml = f"""
name: my_redshift_datasource
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
include_schema_name: true
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>",
CONNECTION_STRING,
)
context.test_yaml_config(datasource_yaml)
context.add_datasource(**yaml.load(datasource_yaml))
# First test for RuntimeBatchRequest using a query
batch_request = RuntimeBatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_name", # this can be anything that identifies this data
runtime_parameters={"query": "SELECT * from taxi_data LIMIT 10"},
batch_identifiers={"default_identifier_name": "default_identifier"},
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
# Second test for BatchRequest naming a table
batch_request = BatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="taxi_data", # this is the name of the table you want to retrieve
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_redshift_datasource"]
assert "taxi_data" in set(
context.get_available_data_asset_names()["my_redshift_datasource"][
"default_inferred_data_connector_name"
]
)
|
great-expectations/great_expectations
|
tests/integration/docusaurus/connecting_to_your_data/database/redshift_yaml_example.py
|
Python
|
apache-2.0
| 3,566 | 0.001963 |
from django.conf.urls import patterns, url
from django.views.decorators.csrf import csrf_exempt
from getpaid.backends.eservice.views import PendingView, SuccessView, FailureView
urlpatterns = patterns('',
url(r'^pending/$', csrf_exempt(PendingView.as_view()), name='getpaid-eservice-pending'),
url(r'^success/$', csrf_exempt(SuccessView.as_view()), name='getpaid-eservice-success'),
url(r'^failure/$', csrf_exempt(FailureView.as_view()), name='getpaid-eservice-failure'),
)
|
mionch/django-getpaid
|
getpaid/backends/eservice/urls.py
|
Python
|
mit
| 488 | 0.010246 |
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
class FeedParserDict(dict):
keymap = {
'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail',
}
def __getitem__(self, key):
"""
:return: A :class:`FeedParserDict`.
"""
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name, value) for (name, value) in link.items() if name != 'rel'])
return [
norel(link)
for link in dict.__getitem__(self, 'links')
if link['rel'] == 'enclosure'
]
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel'] == 'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if (
not dict.__contains__(self, 'updated')
and dict.__contains__(self, 'published')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if (
not dict.__contains__(self, 'updated_parsed')
and dict.__contains__(self, 'published_parsed')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
"""
:return: A :class:`FeedParserDict`.
"""
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, k, default):
if k not in self:
self[k] = default
return default
return self[k]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
# This is incorrect behavior -- dictionaries shouldn't be hashable.
# Note to self: remove this behavior in the future.
return id(self)
|
rembo10/headphones
|
lib/feedparser/util.py
|
Python
|
gpl-3.0
| 6,490 | 0.000308 |
"""
/***************************************************************************
Name : GdalTools
Description : Integrate gdal tools into qgis
Date : 17/Sep/09
copyright : (C) 2009 by Lorenzo Masini and Giuseppe Sucameli (Faunalia)
email : lorenxo86@gmail.com - brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load GdalTools class from file GdalTools
from GdalTools import GdalTools
return GdalTools(iface)
|
sebastic/QGIS
|
python/plugins/GdalTools/__init__.py
|
Python
|
gpl-2.0
| 1,251 | 0.000799 |
import numpy as np
import pandas as pd
def decay_mean(ol, half_life=5):
years = np.array([2012, 2013, 2014, 2016, 2017])
ratings = np.array([9, 11, 14, 11, 4])
today = 2016 + 1
print(ratings.mean())
elapsed_time = years - today
half_life = 2
weights = np.e ** -(elapsed_time * half_life)
print weights
print weights / sum(weights)
print(sum(ratings * weights) / sum(weights))
def main():
print(decay_mean(ratings, 2))
print(decay_mean(ratings, 5))
print(ratings.mean())
if __name__ == '__main__':
main()
|
noelevans/sandpit
|
decay_fns/decay_weightings.py
|
Python
|
mit
| 574 | 0.020906 |
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
class genericBatchInputPage(webapp.RequestHandler):
def get(self):
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'generic','page':'batchinput'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberbatchinput.html', {
'model':'generic',
'model_attributes':'generic Batch Input'})
html = html + template.render(templatepath + '04uberbatchinput_jquery.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', genericBatchInputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
puruckertom/poptox
|
poptox/generic/generic_batchinput.py
|
Python
|
unlicense
| 1,372 | 0.019679 |
from django.conf.urls import patterns, url
from django.utils.translation import ugettext_lazy as _
from . import category_views
from . import feed
urlpatterns = patterns('',
url(r'(\d+)$', category_views.main_view, name='category'),
url(r'(\d+)/%s$' % _('contracts'), category_views.contracts, name='category_contracts'),
url(r'(\d+)/%s/rss$' % _('contracts'), feed.CategoryContractsFeed(), name='category_contracts_rss'),
url(r'(\d+)/%s$' % _('contractors'), category_views.contractors, name='category_contractors'),
url(r'(\d+)/%s$' % _('contracted'), category_views.contracted, name='category_contracted'),
url(r'(\d+)/%s$' % _('tenders'), category_views.tenders, name='category_tenders'),
url(r'(\d+)/%s/rss$' % _('tenders'), feed.CategoryTendersFeed(), name='category_tenders_rss'),
)
|
jorgecarleitao/public-contracts
|
contracts/category_urls.py
|
Python
|
bsd-3-clause
| 977 | 0.007165 |
from __future__ import division, print_function, absolute_import
from os import path
import warnings
DATA_PATH = path.join(path.dirname(__file__), 'data')
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal, run_module_suite,
assert_)
from scipy.io.idl import readsav
def object_array(*args):
"""Constructs a numpy array of objects"""
array = np.empty(len(args), dtype=object)
for i in range(len(args)):
array[i] = args[i]
return array
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (5, ))
assert_equal(s.arrays_rep.b.shape, (5, ))
assert_equal(s.arrays_rep.c.shape, (5, ))
assert_equal(s.arrays_rep.d.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.a[i],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i],
np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_arrays_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
assert_equal(s.arrays_rep.d.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.a[i, j, k],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i, j, k],
np.array([4., 5., 6., 7.],
dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i, j, k],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i, j, k],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_inheritance(self):
s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
assert_identical(s.fc.x, np.array([0], dtype=np.int16))
assert_identical(s.fc.y, np.array([0], dtype=np.int16))
assert_identical(s.fc.r, np.array([0], dtype=np.int16))
assert_identical(s.fc.c, np.array([4], dtype=np.int16))
def test_arrays_corrupt_idl80(self):
# test byte arrays with missing nbyte information from IDL 8.0 .sav file
with warnings.catch_warnings():
warnings.simplefilter('ignore')
s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'),
verbose=False)
assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8))
class TestPointers:
# Check that pointers in .sav files produce references to the same object in Python
def test_pointers(self):
s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False)
assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_(s.c64_pointer1 is s.c64_pointer2)
class TestPointerArray:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
class TestPointerStructures:
# Test that structures are correctly read in
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
assert_(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (5, ))
assert_equal(s.arrays_rep.h.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[i, j, k],
np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i, j, k],
np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
class TestTags:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with warnings.catch_warnings(record=True) as w:
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
if __name__ == "__main__":
run_module_suite()
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.py
|
Python
|
mit
| 19,614 | 0.005302 |
from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from importlib import import_module
from io import BytesIO
from django.conf import settings
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote, urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type, ):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = unquote(path)
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
path = path.encode('utf-8').decode('iso-8859-1')
return path
def get(self, path, data={}, **extra):
"Construct a GET request."
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, **r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type, **extra)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, **r)
def options(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type, **extra)
def put(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type, **extra)
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(
path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(
path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if (user and user.is_active and
'django.contrib.sessions' in settings.INSTALLED_APPS):
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
UserModel = get_user_model()
if self.session:
request.session = self.session
uid = self.session.get("_auth_user_id")
if uid:
request.user = UserModel._default_manager.get(pk=uid)
else:
request.session = engine.SessionStore()
logout(request)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
ericholscher/django
|
django/test/client.py
|
Python
|
bsd-3-clause
| 22,448 | 0.000757 |
# Copyright (c) 2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Error Injection EINJ module."""
from __future__ import print_function
import acpi
import bits
import contextlib
from cpudetect import cpulib
import ctypes
import functools
import ttypager
# Create constants for each value in these dictionaries for readability. These
# names are too generic to put in the acpi module's namespace, but they make
# sense in the einj module.
globals().update(map(reversed, acpi._error_injection_action.iteritems()))
globals().update(map(reversed, acpi._error_injection_instruction.iteritems()))
read_mem = {
1: bits.readb,
2: bits.readw,
3: bits.readl,
4: bits.readq,
}
write_mem = {
1: bits.writeb,
2: bits.writew,
3: bits.writel,
4: bits.writeq,
}
out_port = {
1: bits.outb,
2: bits.outw,
3: bits.outl,
}
error_injection_command_status = {
0x0: 'SUCCESS',
0x1: 'UNKNOWN_FAILURE',
0x2: 'INVALID_ACCESS',
}
globals().update(map(reversed, error_injection_command_status.iteritems()))
# List of actions that can be executed with no custom processing
_action_simple = [
BEGIN_INJECTION_OPERATION,
END_OPERATION,
EXECUTE_OPERATION,
CHECK_BUSY_STATUS,
GET_COMMAND_STATUS,
]
def _execute_action(entry, value=None):
print("entry.injection_action = {:#x} ({})".format(entry.injection_action, acpi._error_injection_action.get(entry.injection_action, "Unknown")))
if entry.injection_action in _action_simple:
return _execute_instruction(entry)
elif entry.injection_action == GET_TRIGGER_ERROR_ACTION_TABLE:
return acpi.trigger_error_action(_execute_instruction(entry))
elif entry.injection_action == SET_ERROR_TYPE:
if value is None:
raise ValueError("action SET_ERROR_TYPE but no input parameter provided")
return _execute_instruction(entry, value.data)
elif entry.injection_action == GET_ERROR_TYPE:
_execute_instruction(entry)
return acpi.error_type_flags.from_address(entry.register_region.address)
elif entry.injection_action == SET_ERROR_TYPE_WITH_ADDRESS:
if value is None:
raise ValueError("action SET_ERROR_TYPE_WITH_ADDRESS but no input paramters provided")
error_type = value[0]
if error_type.processor_correctable or error_type.processor_uncorrectable_non_fatal or error_type.processor_uncorrectable_fatal:
error_type, flags, apicid = value
cpu_error = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
if cpu_error.error_type.vendor_defined and cpu_error.vendor_error_type_extension_structure_offset:
vendor_err_addr = entry.register_region.address + cpu_error.vendor_error_type_extension_structure_offset
vendor_error_type_extension = acpi.set_error_type_with_addr.from_address(vendor_err_addr)
print(vendor_error_type_extension)
print('WRITE_REGISTER SET_ERROR_TYPE_WITH_ADDRESS address - {0:#x}'.format(entry.register_region.address))
cpu_error.error_type = error_type
cpu_error.flags = flags
cpu_error.apicid = apicid
print(cpu_error)
elif error_type.memory_correctable or error_type.memory_uncorrectable_non_fatal or error_type.memory_uncorrectable_fatal:
error_type, flags, mem_addr, mem_addr_range = value
mem_error = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
print('WRITE_REGISTER SET_ERROR_TYPE_WITH_ADDRESS address - {0:#x}'.format(entry.register_region.address))
mem_error.error_type = error_type
mem_error.flags = flags
mem_error.memory_address = mem_addr
mem_error.memory_address_range = mem_addr_range
print(mem_error)
elif error_type.pci_express_correctable or error_type.pci_express_uncorrectable_non_fatal or error_type.pci_express_uncorrectable_fatal:
error_type, flags, segment, bus, device, function = value
pcie_error = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
print('WRITE_REGISTER SET_ERROR_TYPE_WITH_ADDRESS address - {0:#x}'.format(entry.register_region.address))
pcie_error.error_type = error_type
pcie_error.flags = flags
pcie_error.pcie_sbdf.bits.function_num = function
pcie_error.pcie_sbdf.bits.device_num = device
pcie_error.pcie_sbdf.bits.bus_num = bus
pcie_error.pcie_sbdf.bits.pcie_segment = segment
print(pcie_error)
else:
raise ValueError("action SET_ERROR_TYPE_WITH_ADDRESS has unsupported error_type {}".format(error_type))
elif entry.injection_action == TRIGGER_ERROR:
# Execute the actions specified in the trigger action table.
trigger_table = get_trigger_action_table_op()
for entry in trigger_table.entries:
_execute_instruction(entry)
else:
raise ValueError("action is unsupported")
def _execute_instruction(entry, value=None):
print("entry.instruction = {:#x} ({})".format(entry.instruction, acpi._error_injection_instruction.get(entry.instruction, "Unknown")))
if entry.instruction is READ_REGISTER:
return _read_register(entry)
elif entry.instruction is READ_REGISTER_VALUE:
return _read_register_value(entry)
elif entry.instruction is WRITE_REGISTER_VALUE:
return _write_register(entry)
elif entry.instruction is WRITE_REGISTER:
return _write_register(entry, value)
elif entry.instruction is NOOP:
return None
def _read_register(entry):
if entry.register_region.address_space_id == acpi.ASID_SYSTEM_MEMORY:
print('READ_REGISTER address - {:#x}'.format(entry.register_region.address))
value = read_mem[entry.register_region.access_size](entry.register_region.address)
value = value >> entry.register_region.register_bit_offset
value = value & entry.mask
print('READ_REGISTER value - {:#x}'.format(value))
return value
return None
def _read_register_value(entry):
read_value = _read_register(entry)
read_value = read_value >> entry.register_region.register_bit_offset
read_value = read_value & entry.mask
print('entry.value - {:#x}'.format(entry.value))
return read_value == entry.value
def _write_register(entry, value=None):
if not value:
value = entry.value
if entry.register_region.address_space_id == acpi.ASID_SYSTEM_MEMORY:
print('WRITE_REGISTER address - {:#x}'.format(entry.register_region.address))
read_value = read_mem[entry.register_region.access_size](entry.register_region.address)
print('WRITE_REGISTER before value - {:#x}'.format(read_value))
if entry.flags.bits.preserve_register:
read_value = read_value & ~(entry.mask << entry.register_region.register_bit_offset)
value = value | read_value
write_mem[entry.register_region.access_size](entry.register_region.address, value)
read_value = read_mem[entry.register_region.access_size](entry.register_region.address)
print('WRITE_REGISTER after value - {:#x}'.format(read_value))
elif entry.register_region.address_space_id == acpi.ASID_SYSTEM_IO:
print('WRITE_REGISTER_VALUE IO address - {:#x}'.format(entry.register_region.address))
print('WRITE_REGISTER_VALUE value to write - {:#x}'.format(entry.value))
out_port[entry.register_region.access_size](entry.register_region.address, value)
else:
raise ValueError("Unsupported address_space_id: {}".format(entry.register_region.address_space_id))
def _write_register_value(entry, value):
_write_register(entry, value)
def get_action(action):
einj = acpi.parse_einj()
if einj is None:
raise RuntimeError("No ACPI EINJ table found")
for entry in einj.entries:
if entry.injection_action == action:
return entry
def get_and_execute_op(action, value=None):
entry = get_action(action)
if entry is None:
print('Error: Unexpected Action')
return
return _execute_action(entry, value)
def begin_inject_op():
return get_and_execute_op(BEGIN_INJECTION_OPERATION)
def get_trigger_action_table_op():
return get_and_execute_op(GET_TRIGGER_ERROR_ACTION_TABLE)
def set_error_type_op(error_type):
return get_and_execute_op(SET_ERROR_TYPE, error_type)
def get_error_type_op():
return get_and_execute_op(GET_ERROR_TYPE)
def end_inject_op():
return get_and_execute_op(END_OPERATION)
def execute_inject_op():
return get_and_execute_op(EXECUTE_OPERATION)
def _execute_trigger_error_op():
# Create an Trigger Error action to execute
entry = acpi.InjectionInstructionEntry()
entry.injection_action = TRIGGER_ERROR
return _execute_action(entry)
def check_busy_status_op():
busy_status = get_and_execute_op(CHECK_BUSY_STATUS)
print('busy_status = {}'.format('Busy' if busy_status else 'Not Busy'))
return busy_status
def get_cmd_status_op():
cmd_status = get_and_execute_op(GET_COMMAND_STATUS)
print('cmd_status = {:#x} ({})'.format(cmd_status, error_injection_command_status.get(cmd_status, 'Unknown')))
return cmd_status
# This routine is specific to setting a memory error
def _set_error_type_with_addr_op_mem(error_type, flags, mem_addr=None, mem_addr_range=None):
return get_and_execute_op(SET_ERROR_TYPE_WITH_ADDRESS, (error_type, flags, mem_addr, mem_addr_range))
# This routine is specific to setting a processor error
def _set_error_type_with_addr_op_cpu(error_type, flags, apicid=None):
return get_and_execute_op(SET_ERROR_TYPE_WITH_ADDRESS, (error_type, flags, apicid))
# This routine is specific to setting a PCIE error
def _set_error_type_with_addr_op_pcie(error_type, flags, segment=None, bus=None, device=None, function=None):
return get_and_execute_op(SET_ERROR_TYPE_WITH_ADDRESS, (error_type, flags, (segment, bus, device, function)))
def einj_cpu_init():
"""Return the error injection cpu init method.
Returns the cpu-specific method if available, otherwise default.
Computed on first call, and cached for subsequent return."""
global einj_cpu_init
@contextlib.contextmanager
def default_cpu_init():
yield
try:
local_einj_cpu_init = cpulib.quirk_einj_cpu_init
print("QUIRK: Setting processor-specific error injection init")
except AttributeError:
local_einj_cpu_init = default_cpu_init
old_func = einj_cpu_init
def einj_cpu_init():
return local_einj_cpu_init()
functools.update_wrapper(einj_cpu_init, old_func)
return local_einj_cpu_init()
@contextlib.contextmanager
def _error_injection_op():
with einj_cpu_init():
begin_inject_op()
yield
execute_inject_op()
while check_busy_status_op():
continue
cmd_status = get_cmd_status_op()
if cmd_status != SUCCESS:
return
_execute_trigger_error_op()
end_inject_op()
@contextlib.contextmanager
def _inject_memory_error(address=None, mask=None):
# Constructor creates a structure with all zero init
error_type = acpi.error_type_flags()
yield error_type
if (address is not None) and (mask is not None):
# Constructor creates a structure with all zero init
flags = acpi.set_error_type_with_addr_flags()
flags.memory_addr_and_mask_valid = 1
_set_error_type_with_addr_op_mem(error_type, flags, address, mask)
else:
set_error_type_op(error_type)
def inject_memory_correctable_err(address=None, mask=None):
""" Inject memory correctable error.
If address and mask are provided, then SET_ERROR_TYPE_WITH_ADDRESS
Error Injection Action is used. Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().memory_correctable == 0:
print('Memory Correctable error injection is not supported')
return
with _error_injection_op():
with _inject_memory_error(address, mask) as error_type:
error_type.memory_correctable = 1
def inject_memory_unc_nonfatal_err(address=None, mask=None):
"""Inject memory uncorrectable non-fatal error.
If address and mask are provided, then SET_ERROR_TYPE_WITH_ADDRESS
Error Injection Action is used. Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().memory_uncorrectable_non_fatal == 0:
print('Memory Uncorrectable non-Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_memory_error(address, mask) as error_type:
error_type.memory_uncorrectable_non_fatal = 1
def inject_memory_unc_fatal_err(address=None, mask=None):
"""Inject memory uncorrectable fatal error.
If address and mask are provided, then SET_ERROR_TYPE_WITH_ADDRESS
Error Injection Action is used. Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().memory_uncorrectable_fatal == 0:
print('Memory Uncorrectable Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_memory_error(address, mask) as error_type:
error_type.memory_uncorrectable_fatal = 1
@contextlib.contextmanager
def _inject_processor_error(apicid=None):
# Constructor creates a structure with all zero init
error_type = acpi.error_type_flags()
yield error_type
if apicid is not None:
# Constructor creates a structure with all zero init
flags = acpi.set_error_type_with_addr_flags()
flags.processor_apic_valid = 1
_set_error_type_with_addr_op_cpu(error_type, flags, apicid)
else:
set_error_type_op(error_type)
def inject_processor_correctable_err(apicid=None):
""" Inject processor correctable error.
If apicid is provided, then SET_ERROR_TYPE_WITH_ADDRESS Error
Injection Action is used. Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().processor_correctable == 0:
print('Processor Correctable error injection is not supported')
return
with _error_injection_op():
with _inject_processor_error(apicid) as error_type:
error_type.processor_correctable = 1
def inject_processor_unc_nonfatal_err(apicid=None):
"""Inject processor uncorrectable non-fatal error.
If apicid is provided, then SET_ERROR_TYPE_WITH_ADDRESS Error
Injection Action is used. Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().processor_uncorrectable_non_fatal == 0:
print('Processor Uncorrectable non-Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_processor_error(apicid) as error_type:
error_type.processor_uncorrectable_non_fatal = 1
def inject_processor_unc_fatal_err(address=None, mask=None):
"""Inject PCIE uncorrectable fatal error.
If apicid is provided, then SET_ERROR_TYPE_WITH_ADDRESS Error
Injection Action is used. Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().processor_uncorrectable_fatal == 0:
print('Processor Uncorrectable Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_processor_error(apicid) as error_type:
error_type.processor_uncorrectable_fatal = 1
@contextlib.contextmanager
def _inject_pcie_error(segment=None, bus=None, device=None, function=None):
# Constructor creates a structure with all zero init
error_type = acpi.error_type_flags()
yield error_type
if all(x is not None for x in (segment, bus, device, function)):
# Constructor creates a structure with all zero init
flags = acpi.set_error_type_with_addr_flags()
flags.pcie_sbdf_valid = 1
_set_error_type_with_addr_op_pcie(error_type, flags, segment, bus, device, function)
else:
set_error_type_op(error_type)
def inject_pcie_correctable_err(segment=None, bus=None, device=None, function=None):
""" Inject PCIE correctable error.
If segment, bus, device and function are provided, then
SET_ERROR_TYPE_WITH_ADDRESS Error Injection Action is used.
Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().pci_express_correctable == 0:
print('PCI Express Correctable error injection is not supported')
return
with _error_injection_op():
with _inject_pcie_error(segment=None, bus=None, device=None, function=None) as error_type:
error_type.pcie_express_correctable = 1
def inject_pcie_unc_nonfatal_err(segment=None, bus=None, device=None, function=None):
"""Inject PCIE uncorrectable non-fatal error.
If segment, bus, device and function are provided, then
SET_ERROR_TYPE_WITH_ADDRESS Error Injection Action is used.
Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().processor_uncorrectable_non_fatal == 0:
print('PCI Express Uncorrectable non-Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_pcie_error(segment=None, bus=None, device=None, function=None) as error_type:
error_type.pci_expresss_uncorrectable_non_fatal = 1
def inject_pcie_unc_fatal_err(segment=None, bus=None, device=None, function=None):
"""Inject PCIE uncorrectable fatal error.
If segment, bus, device and function are provided, then
SET_ERROR_TYPE_WITH_ADDRESS Error Injection Action is used.
Otherwise, SET_ERROR_TYPE is used."""
if get_error_type_op().pci_express_uncorrectable_fatal == 0:
print('PCIE Uncorrectable Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_pcie_error(segment=None, bus=None, device=None, function=None) as error_type:
error_type.processor_uncorrectable_fatal = 1
def _inject_platform_error():
# Constructor creates a structure with all zero init
error_type = acpi.error_type_flags()
yield error_type
set_error_type_op(error_type)
def inject_platform_correctable_err():
""" Inject platform correctable error."""
if get_error_type_op().platform_correctable == 0:
print('Platform Correctable error injection is not supported')
return
with _error_injection_op():
with _inject_platform_error() as error_type:
error_type.platform_correctable = 1
def inject_platform_unc_nonfatal_err():
"""Inject platform uncorrectable non-fatal error."""
if get_error_type_op().platform_uncorrectable_non_fatal == 0:
print('Platform Uncorrectable non-Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_platform_error() as error_type:
error_type.platform_uncorrectable_non_fatal = 1
def inject_platform_unc_fatal_err():
"""Inject platform uncorrectable fatal error."""
if get_error_type_op().platform_uncorrectable_fatal == 0:
print('Platform Uncorrectable Fatal error injection is not supported')
return
with _error_injection_op():
with _inject_platform_error() as error_type:
error_type.platform_uncorrectable_fatal = 1
def display_einj_address():
address = acpi.get_table_addr("EINJ", 0)
if address is not None:
print('EINJ address {0:#x}'.format(address))
def display_supported_errors():
print(get_error_type_op())
def display_triggers():
with ttypager.page():
print(get_trigger_action_table_op())
def display_vendor_error_type_extension():
with ttypager.page():
entry = get_action(SET_ERROR_TYPE_WITH_ADDRESS)
set_err = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
vendor_err_addr = entry.register_region.address + set_err.vendor_error_type_extension_structure_offset
vendor_err = acpi.vendor_error_type_extension.from_address(vendor_err_addr)
print(vendor_err)
def display_einj():
with ttypager.page():
einj = acpi.parse_einj()
if einj is None:
raise RuntimeError("No ACPI EINJ table found")
print(einj)
def demo():
unc_methods = [
inject_memory_unc_nonfatal_err,
inject_memory_unc_fatal_err,
inject_processor_unc_nonfatal_err,
inject_processor_unc_fatal_err,
inject_pcie_unc_nonfatal_err,
inject_pcie_unc_fatal_err,
inject_platform_unc_nonfatal_err,
inject_platform_unc_fatal_err,
]
corr_methods = [
inject_memory_correctable_err,
inject_processor_correctable_err,
inject_pcie_correctable_err,
inject_platform_correctable_err,
]
display_methods = [
display_einj,
display_einj_address,
display_supported_errors,
display_triggers,
display_vendor_error_type_extension,
]
with ttypager.page():
for item in display_methods:
print("\n\n\nMethod name: {}".format(item.__name__))
print("Method doc:\n{}\n\n".format(item.__doc__ if item.__doc__ else "No documentation for this method"))
item()
for item in corr_methods:
print("\n\nMethod name: {}".format(item.__name__))
print("Method doc: {}".format(item.__doc__ if item.__doc__ else "No documentation for this method"))
item()
for item in unc_methods:
print("\n\n\nMethod name: {}".format(item.__name__))
print("Method doc: {}\n\n".format(item.__doc__ if item.__doc__ else "No documentation for this method"))
print("Based on the name and documentation of this item, it is likely to be fatal.")
print("Execute it directly from the python command line.")
print("Your mileage may vary and if it breaks, you get to keep all the pieces.")
|
ii0/bits
|
python/einj.py
|
Python
|
bsd-3-clause
| 23,447 | 0.004478 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions for use in the Placement API."""
# NOTE(cdent): The exceptions are copied from nova.exception, where they
# were originally used. To prepare for extracting placement to its own
# repository we wish to no longer do that. Instead, exceptions used by
# placement should be in the placement hierarchy.
from oslo_log import log as logging
from nova.i18n import _
LOG = logging.getLogger(__name__)
class _BaseException(Exception):
"""Base Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# NOTE(melwitt): This is done in a separate method so it can be
# monkey-patched during testing to make it a hard failure.
self._log_exception()
message = self.msg_fmt
self.message = message
super(_BaseException, self).__init__(message)
def _log_exception(self):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in self.kwargs.items():
LOG.error("%s: %s" % (name, value)) # noqa
def format_message(self):
# Use the first argument to the python Exception object which
# should be our full exception message, (see __init__).
return self.args[0]
class NotFound(_BaseException):
msg_fmt = _("Resource could not be found.")
class Exists(_BaseException):
msg_fmt = _("Resource already exists.")
class InvalidInventory(_BaseException):
msg_fmt = _("Inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s' invalid.")
class CannotDeleteParentResourceProvider(_BaseException):
msg_fmt = _("Cannot delete resource provider that is a parent of "
"another. Delete child providers first.")
class ConcurrentUpdateDetected(_BaseException):
msg_fmt = _("Another thread concurrently updated the data. "
"Please retry your update")
class ResourceProviderConcurrentUpdateDetected(ConcurrentUpdateDetected):
msg_fmt = _("Another thread concurrently updated the resource provider "
"data. Please retry your update")
class InvalidAllocationCapacityExceeded(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would exceed the capacity.")
class InvalidAllocationConstraintsViolated(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would violate inventory constraints.")
class InvalidInventoryCapacity(InvalidInventory):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than or equal to total.")
class InvalidInventoryCapacityReservedCanBeTotal(InvalidInventoryCapacity):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than total.")
# An exception with this name is used on both sides of the placement/
# nova interaction.
class InventoryInUse(InvalidInventory):
# NOTE(mriedem): This message cannot change without impacting the
# nova.scheduler.client.report._RE_INV_IN_USE regex.
msg_fmt = _("Inventory for '%(resource_classes)s' on "
"resource provider '%(resource_provider)s' in use.")
class InventoryWithResourceClassNotFound(NotFound):
msg_fmt = _("No inventory of class %(resource_class)s found.")
class MaxDBRetriesExceeded(_BaseException):
msg_fmt = _("Max retries of DB transaction exceeded attempting to "
"perform %(action)s.")
class ObjectActionError(_BaseException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class PolicyNotAuthorized(_BaseException):
msg_fmt = _("Policy does not allow %(action)s to be performed.")
class ResourceClassCannotDeleteStandard(_BaseException):
msg_fmt = _("Cannot delete standard resource class %(resource_class)s.")
class ResourceClassCannotUpdateStandard(_BaseException):
msg_fmt = _("Cannot update standard resource class %(resource_class)s.")
class ResourceClassExists(_BaseException):
msg_fmt = _("Resource class %(resource_class)s already exists.")
class ResourceClassInUse(_BaseException):
msg_fmt = _("Cannot delete resource class %(resource_class)s. "
"Class is in use in inventory.")
class ResourceClassNotFound(NotFound):
msg_fmt = _("No such resource class %(resource_class)s.")
# An exception with this name is used on both sides of the placement/
# nova interaction.
class ResourceProviderInUse(_BaseException):
msg_fmt = _("Resource provider has allocations.")
class TraitCannotDeleteStandard(_BaseException):
msg_fmt = _("Cannot delete standard trait %(name)s.")
class TraitExists(_BaseException):
msg_fmt = _("The Trait %(name)s already exists")
class TraitInUse(_BaseException):
msg_fmt = _("The trait %(name)s is in use by a resource provider.")
class TraitNotFound(NotFound):
msg_fmt = _("No such trait(s): %(names)s.")
class ProjectNotFound(NotFound):
msg_fmt = _("No such project(s): %(external_id)s.")
class ProjectExists(Exists):
msg_fmt = _("The project %(external_id)s already exists.")
class UserNotFound(NotFound):
msg_fmt = _("No such user(s): %(external_id)s.")
class UserExists(Exists):
msg_fmt = _("The user %(external_id)s already exists.")
class ConsumerNotFound(NotFound):
msg_fmt = _("No such consumer(s): %(uuid)s.")
class ConsumerExists(Exists):
msg_fmt = _("The consumer %(uuid)s already exists.")
|
mikalstill/nova
|
nova/api/openstack/placement/exception.py
|
Python
|
apache-2.0
| 6,885 | 0.000436 |
# -*- coding: utf-8 -*-
"""Assorted base data structures.
Assorted base data structures provide a generic communicator with V-REP
simulator.
"""
from vrepsim.simulator import get_default_simulator
class Communicator(object):
"""Generic communicator with V-REP simulator."""
def __init__(self, vrep_sim):
if vrep_sim is not None:
self._vrep_sim = vrep_sim
else:
self._vrep_sim = get_default_simulator(raise_on_none=True)
@property
def client_id(self):
"""Client ID."""
return self._vrep_sim.client_id
@property
def vrep_sim(self):
"""Interface to V-REP remote API server."""
return self._vrep_sim
|
macknowak/vrepsim
|
vrepsim/base.py
|
Python
|
gpl-3.0
| 699 | 0 |
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
from boto.compat import json
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2014-10-31"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphens; it cannot end with a hyphen or contain two consecutive
hyphens.
Constraints:
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='AddSourceIdentifierToSubscription',
verb='POST',
path='/', params=params)
def add_tags_to_resource(self, resource_name, tags):
"""
Adds metadata tags to an Amazon RDS resource. These tags can
also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in Condition
statement in IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see `Tagging
Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be added
to. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tags: list
:param tags: The tags to be assigned to the Amazon RDS resource.
Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {'ResourceName': resource_name, }
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='AddTagsToResource',
verb='POST',
path='/', params=params)
def authorize_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Enables ingress to a DBSecurityGroup using one of two forms of
authorization. First, EC2 or VPC security groups can be added
to the DBSecurityGroup if the application using the database
is running on EC2 or VPC instances. Second, IP ranges are
available if the application accessing your database is
running on the Internet. Required parameters for this API are
one of CIDR range, EC2SecurityGroupId for VPC, or
(EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId for non-VPC).
You cannot authorize ingress from an EC2 security group in one
Region to an Amazon RDS DB instance in another. You cannot
authorize ingress from a VPC security group in one VPC to an
Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to add
authorization to.
:type cidrip: string
:param cidrip: The IP range to authorize.
:type ec2_security_group_name: string
:param ec2_security_group_name: Name of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: Id of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: AWS Account Number of the owner of
the EC2 security group specified in the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def copy_db_snapshot(self, source_db_snapshot_identifier,
target_db_snapshot_identifier, tags=None):
"""
Copies the specified DBSnapshot. The source DBSnapshot must be
in the "available" state.
:type source_db_snapshot_identifier: string
:param source_db_snapshot_identifier: The identifier for the source DB
snapshot.
Constraints:
+ Must be the identifier for a valid system snapshot in the "available"
state.
Example: `rds:mydb-2012-04-02-00-01`
:type target_db_snapshot_identifier: string
:param target_db_snapshot_identifier: The identifier for the copied
snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-db-snapshot`
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CopyDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_instance(self, db_instance_identifier, allocated_storage,
db_instance_class, engine, master_username,
master_user_password, db_name=None,
db_security_groups=None,
vpc_security_group_ids=None,
availability_zone=None, db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None, port=None,
multi_az=None, engine_version=None,
auto_minor_version_upgrade=None,
license_model=None, iops=None,
option_group_name=None, character_set_name=None,
publicly_accessible=None, tags=None,
storage_encrypted=False):
"""
Creates a new DB instance.
:type db_name: string
:param db_name: The meaning of this parameter differs according to the
database engine you use.
**MySQL**
The name of the database to create when the DB instance is created. If
this parameter is not specified, no database is created in the DB
instance.
Constraints:
+ Must contain 1 to 64 alphanumeric characters
+ Cannot be a word reserved by the specified database engine
Type: String
**Oracle**
The Oracle System ID (SID) of the created DB instance.
Default: `ORCL`
Constraints:
+ Cannot be longer than 8 characters
**SQL Server**
Not applicable. Must be null.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This
parameter is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
for SQL Server).
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
Example: `mydbinstance`
:type allocated_storage: integer
:param allocated_storage: The amount of storage (in gigabytes) to be
initially allocated for the database instance.
**MySQL**
Constraints: Must be an integer from 5 to 1024.
Type: Integer
**Oracle**
Constraints: Must be an integer from 10 to 1024.
**SQL Server**
Constraints: Must be an integer from 200 to 1024 (Standard Edition and
Enterprise Edition) or from 30 to 1024 (Express Edition and Web
Edition)
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the DB
instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
:type engine: string
:param engine: The name of the database engine to be used for this
instance.
Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
`sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
:type master_username: string
:param master_username:
The name of master user for the client DB instance.
**MySQL**
Constraints:
+ Must be 1 to 16 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
Type: String
**Oracle**
Constraints:
+ Must be 1 to 30 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
**SQL Server**
Constraints:
+ Must be 1 to 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
:type master_user_password: string
:param master_user_password: The password for the master database user.
Can be any printable ASCII character except "/", '"', or "@".
Type: String
**MySQL**
Constraints: Must contain from 8 to 41 characters.
**Oracle**
Constraints: Must contain from 8 to 30 characters.
**SQL Server**
Constraints: Must contain from 8 to 128 characters.
:type db_security_groups: list
:param db_security_groups: A list of DB security groups to associate
with this DB instance.
Default: The default DB security group for the database engine.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of EC2 VPC security groups to
associate with this DB instance.
Default: The default EC2 VPC security group for the DB subnet group's
VPC.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
Constraint: The AvailabilityZone parameter cannot be specified if the
MultiAZ parameter is set to `True`. The specified Availability Zone
must be in the same region as the current endpoint.
:type db_subnet_group_name: string
:param db_subnet_group_name: A DB subnet group to associate with this
DB instance.
If there is no DB subnet group, then it is a non-VPC DB instance.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. To see the
time blocks available, see ` Adjusting the Preferred Maintenance
Window`_ in the Amazon RDS User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group to associate with this DB instance.
If this argument is omitted, the default DBParameterGroup for the
specified engine will be used.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: integer
:param backup_retention_period:
The number of days for which automated backups are retained. Setting
this parameter to a positive number enables backups. Setting this
parameter to 0 disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
:type port: integer
:param port: The port number on which the database accepts connections.
**MySQL**
Default: `3306`
Valid Values: `1150-65535`
Type: Integer
**Oracle**
Default: `1521`
Valid Values: `1150-65535`
**SQL Server**
Default: `1433`
Valid Values: `1150-65535` except for `1434` and `3389`.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
You cannot set the AvailabilityZone parameter if the MultiAZ
parameter is set to true.
:type engine_version: string
:param engine_version: The version number of the database engine to
use.
**MySQL**
Example: `5.1.42`
Type: String
**Oracle**
Example: `11.2.0.2.v2`
Type: String
**SQL Server**
Example: `10.50.2789.0.v1`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the
maintenance window.
Default: `True`
:type license_model: string
:param license_model: License model information for this DB instance.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type character_set_name: string
:param character_set_name: For supported engines, indicates that the DB
instance should be associated with the specified CharacterSet.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
:type storage_encrypted: boolean
:param storage_encrypted: If true, use AWS's managed encryption for RDS.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': db_instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
'StorageEncrypted': str(storage_encrypted).lower()
}
if db_name is not None:
params['DBName'] = db_name
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if port is not None:
params['Port'] = port
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if character_set_name is not None:
params['CharacterSetName'] = character_set_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstance',
verb='POST',
path='/', params=params)
def create_db_instance_read_replica(self, db_instance_identifier,
source_db_instance_identifier,
db_instance_class=None,
availability_zone=None, port=None,
auto_minor_version_upgrade=None,
iops=None, option_group_name=None,
publicly_accessible=None, tags=None):
"""
Creates a DB instance that acts as a read replica of a source
DB instance.
All read replica DB instances are created as Single-AZ
deployments with backups disabled. All other DB instance
attributes (including DB security groups and DB parameter
groups) are inherited from the source DB instance, except as
specified below.
The source DB instance must have backup retention enabled.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier of the read
replica. This is the unique key that identifies a DB instance. This
parameter is stored as a lowercase string.
:type source_db_instance_identifier: string
:param source_db_instance_identifier: The identifier of the DB instance
that will act as the source for the read replica. Each DB instance
can have up to five read replicas.
Constraints: Must be the identifier of an existing DB instance that is
not already a read replica DB instance.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the read
replica.
Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
| db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
Default: Inherits from the source DB instance.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone that the
read replica will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
:type port: integer
:param port: The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: `1150-65535`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the read replica during the
maintenance window.
Default: Inherits from the source DB instance
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
:type option_group_name: string
:param option_group_name: The option group the DB instance will be
associated with. If omitted, the default option group for the
engine specified will be used.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'SourceDBInstanceIdentifier': source_db_instance_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if port is not None:
params['Port'] = port
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstanceReadReplica',
verb='POST',
path='/', params=params)
def create_db_parameter_group(self, db_parameter_group_name,
db_parameter_group_family, description,
tags=None):
"""
Creates a new DB parameter group.
A DB parameter group is initially created with the default
parameters for the database engine used by the DB instance. To
provide custom values for any of the parameters, you must
modify the group after creating it using
ModifyDBParameterGroup . Once you've created a DB parameter
group, you need to associate it with your DB instance using
ModifyDBInstance . When you associate a new DB parameter group
with a running DB instance, you need to reboot the DB Instance
for the new DB parameter group and associated settings to take
effect.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
This value is stored as a lower-case string.
:type db_parameter_group_family: string
:param db_parameter_group_family: The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
:type description: string
:param description: The description for the DB parameter group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBParameterGroupName': db_parameter_group_name,
'DBParameterGroupFamily': db_parameter_group_family,
'Description': description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBParameterGroup',
verb='POST',
path='/', params=params)
def create_db_security_group(self, db_security_group_name,
db_security_group_description, tags=None):
"""
Creates a new DB security group. DB security groups control
access to a DB instance.
:type db_security_group_name: string
:param db_security_group_name: The name for the DB security group. This
value is stored as a lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
Example: `mysecuritygroup`
:type db_security_group_description: string
:param db_security_group_description: The description for the DB
security group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBSecurityGroupName': db_security_group_name,
'DBSecurityGroupDescription': db_security_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSecurityGroup',
verb='POST',
path='/', params=params)
def create_db_snapshot(self, db_snapshot_identifier,
db_instance_identifier, tags=None):
"""
Creates a DBSnapshot. The source DBInstance must be in
"available" state.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The identifier for the DB snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This is the unique key that identifies a DB
instance. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBSnapshotIdentifier': db_snapshot_identifier,
'DBInstanceIdentifier': db_instance_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_subnet_group(self, db_subnet_group_name,
db_subnet_group_description, subnet_ids,
tags=None):
"""
Creates a new DB subnet group. DB subnet groups must contain
at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBSubnetGroupName': db_subnet_group_name,
'DBSubnetGroupDescription': db_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, event_categories=None,
source_ids=None, enabled=None, tags=None):
"""
Creates an RDS event notification subscription. This action
requires a topic ARN (Amazon Resource Name) created by either
the RDS console, the SNS console, or the SNS API. To obtain an
ARN with SNS, you must create a topic in Amazon SNS and
subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the type of source (SourceType) you want to be
notified of, provide a list of RDS sources (SourceIds) that
triggers the events, and provide a list of event categories
(EventCategories) for events you want to be notified of. For
example, you can specify SourceType = db-instance, SourceIds =
mydbinstance1, mydbinstance2 and EventCategories =
Availability, Backup.
If you specify both the SourceType and SourceIds, such as
SourceType = db-instance and SourceIdentifier = myDBInstance1,
you will be notified of all the db-instance events for the
specified source. If you specify a SourceType but do not
specify a SourceIdentifier, you will receive notice of the
events for that source type for all your RDS sources. If you
do not specify either the SourceType nor the SourceIdentifier,
you will be notified of events generated from all RDS sources
belonging to your customer account.
:type subscription_name: string
:param subscription_name: The name of the subscription.
Constraints: The name must be less than 255 characters.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type source_ids: list
:param source_ids:
The list of identifiers of the event sources for which events will be
returned. If not specified, then all sources are included in the
response. An identifier must begin with a letter and must contain
only ASCII letters, digits, and hyphens; it cannot end with a
hyphen or contain two consecutive hyphens.
Constraints:
+ If SourceIds are supplied, SourceType must also be provided.
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription, set to **false** to create the subscription but not
active it.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_option_group(self, option_group_name, engine_name,
major_engine_version, option_group_description,
tags=None):
"""
Creates a new option group. You can create up to 20 option
groups.
:type option_group_name: string
:param option_group_name: Specifies the name of the option group to be
created.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `myoptiongroup`
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be associated with.
:type option_group_description: string
:param option_group_description: The description of the option group.
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'OptionGroupName': option_group_name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': option_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateOptionGroup',
verb='POST',
path='/', params=params)
def delete_db_instance(self, db_instance_identifier,
skip_final_snapshot=None,
final_db_snapshot_identifier=None):
"""
The DeleteDBInstance action deletes a previously provisioned
DB instance. A successful response from the web service
indicates the request was received correctly. When you delete
a DB instance, all automated backups for that instance are
deleted and cannot be recovered. Manual DB snapshots of the DB
instance to be deleted are not deleted.
If a final DB snapshot is requested the status of the RDS
instance will be "deleting" until the DB snapshot is created.
The API action `DescribeDBInstance` is used to monitor the
status of this operation. The action cannot be canceled or
reverted once submitted.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier for the DB instance to be deleted. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type skip_final_snapshot: boolean
:param skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If `True` is specified,
no DBSnapshot is created. If false is specified, a DB snapshot is
created before the DB instance is deleted.
The FinalDBSnapshotIdentifier parameter must be specified if
SkipFinalSnapshot is `False`.
Default: `False`
:type final_db_snapshot_identifier: string
:param final_db_snapshot_identifier:
The DBSnapshotIdentifier of the new DBSnapshot created when
SkipFinalSnapshot is set to `False`.
Specifying this parameter and also setting the SkipFinalShapshot
parameter to true results in an error.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if skip_final_snapshot is not None:
params['SkipFinalSnapshot'] = str(
skip_final_snapshot).lower()
if final_db_snapshot_identifier is not None:
params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
return self._make_request(
action='DeleteDBInstance',
verb='POST',
path='/', params=params)
def delete_db_parameter_group(self, db_parameter_group_name):
"""
Deletes a specified DBParameterGroup. The DBParameterGroup
cannot be associated with any RDS instances to be deleted.
The specified DB parameter group cannot be associated with any
DB instances.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ You cannot delete a default DB parameter group
+ Cannot be associated with any DB instances
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
return self._make_request(
action='DeleteDBParameterGroup',
verb='POST',
path='/', params=params)
def delete_db_security_group(self, db_security_group_name):
"""
Deletes a DB security group.
The specified DB security group must not be associated with
any DB instances.
:type db_security_group_name: string
:param db_security_group_name:
The name of the DB security group to delete.
You cannot delete the default DB security group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
"""
params = {'DBSecurityGroupName': db_security_group_name, }
return self._make_request(
action='DeleteDBSecurityGroup',
verb='POST',
path='/', params=params)
def delete_db_snapshot(self, db_snapshot_identifier):
"""
Deletes a DBSnapshot.
The DBSnapshot must be in the `available` state to be deleted.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The DBSnapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the
`available` state.
"""
params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
return self._make_request(
action='DeleteDBSnapshot',
verb='POST',
path='/', params=params)
def delete_db_subnet_group(self, db_subnet_group_name):
"""
Deletes a DB subnet group.
The specified database subnet group must not be associated
with any DB instances.
:type db_subnet_group_name: string
:param db_subnet_group_name:
The name of the database subnet group to delete.
You cannot delete the default subnet group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
return self._make_request(
action='DeleteDBSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an RDS event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to delete.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_option_group(self, option_group_name):
"""
Deletes an existing option group.
:type option_group_name: string
:param option_group_name:
The name of the option group to be deleted.
You cannot delete default option groups.
"""
params = {'OptionGroupName': option_group_name, }
return self._make_request(
action='DeleteOptionGroup',
verb='POST',
path='/', params=params)
def describe_db_engine_versions(self, engine=None, engine_version=None,
db_parameter_group_family=None,
max_records=None, marker=None,
default_only=None,
list_supported_character_sets=None):
"""
Returns a list of the available DB engines.
:type engine: string
:param engine: The database engine to return.
:type engine_version: string
:param engine_version: The database engine version to return.
Example: `5.1.49`
:type db_parameter_group_family: string
:param db_parameter_group_family:
The name of a specific DB parameter group family to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
:type default_only: boolean
:param default_only: Indicates that only the default version of the
specified engine or engine and major version combination is
returned.
:type list_supported_character_sets: boolean
:param list_supported_character_sets: If this parameter is specified,
and if the requested engine supports the CharacterSetName parameter
for CreateDBInstance, the response includes a list of supported
character sets for each engine version.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_parameter_group_family is not None:
params['DBParameterGroupFamily'] = db_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
if list_supported_character_sets is not None:
params['ListSupportedCharacterSets'] = str(
list_supported_character_sets).lower()
return self._make_request(
action='DescribeDBEngineVersions',
verb='POST',
path='/', params=params)
def describe_db_instances(self, db_instance_identifier=None,
filters=None, max_records=None, marker=None):
"""
Returns information about provisioned RDS instances. This API
supports pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBInstances request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords` .
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBInstances',
verb='POST',
path='/', params=params)
def describe_db_log_files(self, db_instance_identifier,
filename_contains=None, file_last_written=None,
file_size=None, max_records=None, marker=None):
"""
Returns a list of DB log files for the DB instance.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filename_contains: string
:param filename_contains: Filters the available log files for log file
names that contain the specified string.
:type file_last_written: long
:param file_last_written: Filters the available log files for files
written since the specified date, in POSIX timestamp format.
:type file_size: long
:param file_size: Filters the available log files for files larger than
the specified size.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if filename_contains is not None:
params['FilenameContains'] = filename_contains
if file_last_written is not None:
params['FileLastWritten'] = file_last_written
if file_size is not None:
params['FileSize'] = file_size
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBLogFiles',
verb='POST',
path='/', params=params)
def describe_db_parameter_groups(self, db_parameter_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBParameterGroup` descriptions. If a
`DBParameterGroupName` is specified, the list will contain
only the description of the specified DB parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameterGroups` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameterGroups',
verb='POST',
path='/', params=params)
def describe_db_parameters(self, db_parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns the detailed parameter list for a particular DB
parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type source: string
:param source: The parameter types to return.
Default: All parameter types returned
Valid Values: `user | system | engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameters` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameters',
verb='POST',
path='/', params=params)
def describe_db_security_groups(self, db_security_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBSecurityGroup` descriptions. If a
`DBSecurityGroupName` is specified, the list will contain only
the descriptions of the specified DB security group.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
return details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSecurityGroups request. If this parameter is specified,
the response includes only records beyond the marker, up to the
value specified by `MaxRecords`.
"""
params = {}
if db_security_group_name is not None:
params['DBSecurityGroupName'] = db_security_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSecurityGroups',
verb='POST',
path='/', params=params)
def describe_db_snapshots(self, db_instance_identifier=None,
db_snapshot_identifier=None,
snapshot_type=None, filters=None,
max_records=None, marker=None):
"""
Returns information about DB snapshots. This API supports
pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
A DB instance identifier to retrieve the list of DB snapshots for.
Cannot be used in conjunction with `DBSnapshotIdentifier`. This
parameter is not case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier:
A specific DB snapshot identifier to describe. Cannot be used in
conjunction with `DBInstanceIdentifier`. This value is stored as a
lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ If this is the identifier of an automated snapshot, the
`SnapshotType` parameter must also be specified.
:type snapshot_type: string
:param snapshot_type: The type of snapshots that will be returned.
Values can be "automated" or "manual." If not specified, the
returned results will include all snapshots types.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBSnapshots` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if db_snapshot_identifier is not None:
params['DBSnapshotIdentifier'] = db_snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSnapshots',
verb='POST',
path='/', params=params)
def describe_db_subnet_groups(self, db_subnet_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a
DBSubnetGroupName is specified, the list will contain only the
descriptions of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name of the DB subnet group to return
details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSubnetGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self, db_parameter_group_family,
max_records=None, marker=None):
"""
Returns the default engine and system parameter information
for the specified database engine.
:type db_parameter_group_family: string
:param db_parameter_group_family: The name of the DB parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeEngineDefaultParameters` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {
'DBParameterGroupFamily': db_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of categories for all event source types, or,
if specified, for a specified source type. You can see a list
of the event categories and source types in the ` Events`_
topic in the Amazon RDS User Guide.
:type source_type: string
:param source_type: The type of source that will be generating the
events.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
filters=None, max_records=None,
marker=None):
"""
Lists all the subscription descriptions for a customer
account. The description for a subscription includes
SubscriptionName, SNSTopicARN, CustomerID, SourceType,
SourceID, CreationTime, and Status.
If you specify a SubscriptionName, lists the description for
that subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to describe.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
event_categories=None, max_records=None, marker=None):
"""
Returns events related to DB instances, DB security groups, DB
snapshots, and DB parameter groups for the past 14 days.
Events specific to a particular DB instance, DB security
group, database snapshot, or DB parameter group can be
obtained by providing the name as a parameter. By default, the
past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If not specified, then all sources are included in the response.
Constraints:
+ If SourceIdentifier is supplied, SourceType must also be provided.
+ If the source type is `DBInstance`, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
be supplied.
+ If the source type is `DBParameterGroup`, a `DBParameterGroupName`
must be supplied.
+ If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
supplied.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type duration: integer
:param duration: The number of minutes to retrieve events for.
Default: 60
:type event_categories: list
:param event_categories: A list of event categories that trigger
notifications for a event notification subscription.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeEvents request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_option_group_options(self, engine_name,
major_engine_version=None,
max_records=None, marker=None):
"""
Describes all available options.
:type engine_name: string
:param engine_name: A required parameter. Options available for the
given Engine name will be described.
:type major_engine_version: string
:param major_engine_version: If specified, filters the results to
include only options for the specified major engine version.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {'EngineName': engine_name, }
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOptionGroupOptions',
verb='POST',
path='/', params=params)
def describe_option_groups(self, option_group_name=None, filters=None,
marker=None, max_records=None,
engine_name=None, major_engine_version=None):
"""
Describes the available option groups.
:type option_group_name: string
:param option_group_name: The name of the option group to describe.
Cannot be supplied together with EngineName or MajorEngineVersion.
:type filters: list
:param filters:
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOptionGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type engine_name: string
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: string
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific database engine version.
If specified, then EngineName must also be specified.
"""
params = {}
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if marker is not None:
params['Marker'] = marker
if max_records is not None:
params['MaxRecords'] = max_records
if engine_name is not None:
params['EngineName'] = engine_name
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
return self._make_request(
action='DescribeOptionGroups',
verb='POST',
path='/', params=params)
def describe_orderable_db_instance_options(self, engine,
engine_version=None,
db_instance_class=None,
license_model=None, vpc=None,
max_records=None, marker=None):
"""
Returns a list of orderable DB instance options for the
specified engine.
:type engine: string
:param engine: The name of the engine to retrieve DB instance options
for.
:type engine_version: string
:param engine_version: The engine version filter value. Specify this
parameter to show only the available offerings matching the
specified engine version.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type license_model: string
:param license_model: The license model filter value. Specify this
parameter to show only the available offerings matching the
specified license model.
:type vpc: boolean
:param vpc: The VPC filter value. Specify this parameter to show only
the available VPC or non-VPC offerings.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {'Engine': engine, }
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if license_model is not None:
params['LicenseModel'] = license_model
if vpc is not None:
params['Vpc'] = str(
vpc).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableDBInstanceOptions',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances(self, reserved_db_instance_id=None,
reserved_db_instances_offering_id=None,
db_instance_class=None, duration=None,
product_description=None,
offering_type=None, multi_az=None,
filters=None, max_records=None,
marker=None):
"""
Returns information about reserved DB instances for this
account, or about a specified reserved DB instance.
:type reserved_db_instance_id: string
:param reserved_db_instance_id: The reserved DB instance identifier
filter value. Specify this parameter to show only the reservation
that matches the specified reservation ID.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only purchased
reservations matching the specified offering identifier.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only those reservations matching the
specified DB instances class.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Specify this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value.
Specify this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only those reservations matching the specified Multi-AZ
parameter.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstances',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances_offerings(self,
reserved_db_instances_offering_id=None,
db_instance_class=None,
duration=None,
product_description=None,
offering_type=None,
multi_az=None,
max_records=None,
marker=None):
"""
Lists available reserved DB instance offerings.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only the available
offering that matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Specify this parameter to show only reservations for this duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: Product description filter value. Specify
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only the available offerings matching the specified Multi-AZ
parameter.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstancesOfferings',
verb='POST',
path='/', params=params)
def download_db_log_file_portion(self, db_instance_identifier,
log_file_name, marker=None,
number_of_lines=None):
"""
Downloads the last line of the specified log file.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type log_file_name: string
:param log_file_name: The name of the log file to be downloaded.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
:type number_of_lines: integer
:param number_of_lines: The number of lines remaining to be downloaded.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'LogFileName': log_file_name,
}
if marker is not None:
params['Marker'] = marker
if number_of_lines is not None:
params['NumberOfLines'] = number_of_lines
return self._make_request(
action='DownloadDBLogFilePortion',
verb='POST',
path='/', params=params)
def list_tags_for_resource(self, resource_name):
"""
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource with tags to be listed.
This value is an Amazon Resource Name (ARN). For information about
creating an ARN, see ` Constructing an RDS Amazon Resource Name
(ARN)`_.
"""
params = {'ResourceName': resource_name, }
return self._make_request(
action='ListTagsForResource',
verb='POST',
path='/', params=params)
def modify_db_instance(self, db_instance_identifier,
allocated_storage=None, db_instance_class=None,
db_security_groups=None,
vpc_security_group_ids=None,
apply_immediately=None, master_user_password=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None,
preferred_maintenance_window=None, multi_az=None,
engine_version=None,
allow_major_version_upgrade=None,
auto_minor_version_upgrade=None, iops=None,
option_group_name=None,
new_db_instance_identifier=None):
"""
Modify settings for a DB instance. You can change one or more
database configuration parameters by specifying these
parameters and the new values in the request.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This value is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type allocated_storage: integer
:param allocated_storage: The new storage capacity of the RDS instance.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
**MySQL**
Default: Uses existing setting
Valid Values: 5-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
**Oracle**
Default: Uses existing setting
Valid Values: 10-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
**SQL Server**
Cannot be modified.
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type db_instance_class: string
:param db_instance_class: The new compute and memory capacity of the DB
instance. To determine the instance classes that are available for
a particular DB engine, use the DescribeOrderableDBInstanceOptions
action.
Passing a value for this parameter causes an outage during the change
and is applied during the next maintenance window, unless the
`ApplyImmediately` parameter is specified as `True` for this
request.
Default: Uses existing setting
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type db_security_groups: list
:param db_security_groups:
A list of DB security groups to authorize on this DB instance. Changing
this parameter does not result in an outage and the change is
asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids:
A list of EC2 VPC security groups to authorize on this DB instance.
This change is asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type apply_immediately: boolean
:param apply_immediately: Specifies whether or not the modifications in
this request and any pending modifications are asynchronously
applied as soon as possible, regardless of the
`PreferredMaintenanceWindow` setting for the DB instance.
If this parameter is passed as `False`, changes to the DB instance are
applied on the next call to RebootDBInstance, the next maintenance
reboot, or the next failure reboot, whichever occurs first. See
each parameter to determine when a change is applied.
Default: `False`
:type master_user_password: string
:param master_user_password:
The new password for the DB instance master user. Can be any printable
ASCII character except "/", '"', or "@".
Changing this parameter does not result in an outage and the change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting
Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
alphanumeric characters (Oracle), or 8 to 128 alphanumeric
characters (SQL Server).
Amazon RDS API actions never return the password, so this action
provides a way to regain access to a master instance user if the
password is lost.
:type db_parameter_group_name: string
:param db_parameter_group_name: The name of the DB parameter group to
apply to this DB instance. Changing this parameter does not result
in an outage and the change is applied during the next maintenance
window unless the `ApplyImmediately` parameter is set to `True` for
this request.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter
group family as this DB instance.
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Changing this parameter can result in an outage if you change from 0 to
a non-zero value or from a non-zero value to 0. These changes are
applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
you change the parameter from one non-zero value to another non-
zero value, the change is asynchronously applied as soon as
possible.
Default: Uses existing setting
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas or if the DB instance is a read replica
:type preferred_backup_window: string
:param preferred_backup_window:
The daily time range during which automated backups are created if
automated backups are enabled, as determined by the
`BackupRetentionPeriod`. Changing this parameter does not result in
an outage and the change is asynchronously applied as soon as
possible.
Constraints:
+ Must be in the format hh24:mi-hh24:mi
+ Times should be Universal Time Coordinated (UTC)
+ Must not conflict with the preferred maintenance window
+ Must be at least 30 minutes
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, which may result in an
outage. Changing this parameter does not result in an outage,
except in the following situation, and the change is asynchronously
applied as soon as possible. If there are pending actions that
cause a reboot, and the maintenance window is changed to include
the current time, then changing this parameter will cause a reboot
of the DB instance. If moving this window to the current time,
there must be at least 30 minutes between the current time and end
of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
Constraints: Cannot be specified if the DB instance is a read replica.
:type engine_version: string
:param engine_version: The version number of the database engine to
upgrade to. Changing this parameter results in an outage and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
For major version upgrades, if a non-default DB parameter group is
currently in use, a new DB parameter group in the DB parameter
group family for the new engine version must be specified. The new
DB parameter group can be the default for that DB parameter group
family.
Example: `5.1.42`
:type allow_major_version_upgrade: boolean
:param allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an
outage and the change is asynchronously applied as soon as
possible.
Constraints: This parameter must be set to true when specifying a value
for the EngineVersion parameter that is a different major version
than the DB instance's current version.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window. Changing this parameter does not result in
an outage except in the following case and the change is
asynchronously applied as soon as possible. An outage will result
if this parameter is set to `True` during the maintenance window,
and a newer minor version is available, and RDS has enabled auto
patching for that engine version.
:type iops: integer
:param iops: The new Provisioned IOPS (I/O operations per second) value
for the RDS instance. Changing this parameter does not result in an
outage and the change is applied during the next maintenance window
unless the `ApplyImmediately` parameter is set to `True` for this
request.
Default: Uses existing setting
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group. Changing this parameter
does not result in an outage except in the following case and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
the parameter change results in an option group that enables OEM,
this change can cause a brief (sub-second) period during which new
connections are rejected but existing connections are not
interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type new_db_instance_identifier: string
:param new_db_instance_identifier:
The new DB instance identifier for the DB instance when renaming a DB
Instance. This value is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if allocated_storage is not None:
params['AllocatedStorage'] = allocated_storage
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if allow_major_version_upgrade is not None:
params['AllowMajorVersionUpgrade'] = str(
allow_major_version_upgrade).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if new_db_instance_identifier is not None:
params['NewDBInstanceIdentifier'] = new_db_instance_identifier
return self._make_request(
action='ModifyDBInstance',
verb='POST',
path='/', params=params)
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group. To modify
more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A
maximum of 20 parameters can be modified in a single request.
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type parameters: list
:param parameters:
An array of parameter names, values, and the apply method for the
parameter update. At least one parameter name, value, and apply
method must be supplied; subsequent arguments are optional. A
maximum of 20 parameters may be modified in a single request.
Valid Values (for the application method): `immediate | pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the pending-reboot value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ModifyDBParameterGroup',
verb='POST',
path='/', params=params)
def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
db_subnet_group_description=None):
"""
Modifies an existing DB subnet group. DB subnet groups must
contain at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the DB subnet group.
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if db_subnet_group_description is not None:
params['DBSubnetGroupDescription'] = db_subnet_group_description
return self._make_request(
action='ModifyDBSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
event_categories=None, enabled=None):
"""
Modifies an existing RDS event notification subscription. Note
that you cannot modify the source identifiers using this call;
to change source identifiers for a subscription, use the
AddSourceIdentifierToSubscription and
RemoveSourceIdentifierFromSubscription calls.
You can see a list of the event categories for a given
SourceType in the `Events`_ topic in the Amazon RDS User Guide
or by using the **DescribeEventCategories** action.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription.
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_option_group(self, option_group_name, options_to_include=None,
options_to_remove=None, apply_immediately=None):
"""
Modifies an existing option group.
:type option_group_name: string
:param option_group_name: The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type options_to_include: list
:param options_to_include: Options in this list are added to the option
group or, if already present, the specified configuration is used
to update the existing configuration.
:type options_to_remove: list
:param options_to_remove: Options in this list are removed from the
option group.
:type apply_immediately: boolean
:param apply_immediately: Indicates whether the changes should be
applied immediately, or during the next maintenance window for each
instance associated with the option group.
"""
params = {'OptionGroupName': option_group_name, }
if options_to_include is not None:
self.build_complex_list_params(
params, options_to_include,
'OptionsToInclude.member',
('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
if options_to_remove is not None:
self.build_list_params(params,
options_to_remove,
'OptionsToRemove.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
return self._make_request(
action='ModifyOptionGroup',
verb='POST',
path='/', params=params)
def promote_read_replica(self, db_instance_identifier,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promotes a read replica DB instance to a standalone DB
instance.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This value
is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing read replica DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: mydbinstance
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
return self._make_request(
action='PromoteReadReplica',
verb='POST',
path='/', params=params)
def purchase_reserved_db_instances_offering(self,
reserved_db_instances_offering_id,
reserved_db_instance_id=None,
db_instance_count=None,
tags=None):
"""
Purchases a reserved DB instance offering.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The ID of the Reserved DB
instance offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_db_instance_id: string
:param reserved_db_instance_id: Customer-specified identifier to track
this reservation.
Example: myreservationID
:type db_instance_count: integer
:param db_instance_count: The number of instances to reserve.
Default: `1`
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if db_instance_count is not None:
params['DBInstanceCount'] = db_instance_count
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='PurchaseReservedDBInstancesOffering',
verb='POST',
path='/', params=params)
def reboot_db_instance(self, db_instance_identifier, force_failover=None):
"""
Rebooting a DB instance restarts the database engine service.
A reboot also applies to the DB instance any modifications to
the associated DB parameter group that were pending. Rebooting
a DB instance results in a momentary outage of the instance,
during which the DB instance status is set to rebooting. If
the RDS instance is configured for MultiAZ, it is possible
that the reboot will be conducted through a failover. An
Amazon RDS event is created when the reboot is completed.
If your DB instance is deployed in multiple Availability
Zones, you can force a failover from one AZ to the other
during the reboot. You might force a failover to test the
availability of your DB instance deployment or to restore
operations to the original AZ after a failover occurs.
The time required to reboot is a function of the specific
database engine's crash recovery process. To improve the
reboot time, we recommend that you reduce database activities
as much as possible during the reboot process to reduce
rollback activity for in-transit transactions.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type force_failover: boolean
:param force_failover: When `True`, the reboot will be conducted
through a MultiAZ failover.
Constraint: You cannot specify `True` if the instance is not configured
for MultiAZ.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if force_failover is not None:
params['ForceFailover'] = str(
force_failover).lower()
return self._make_request(
action='RebootDBInstance',
verb='POST',
path='/', params=params)
def remove_source_identifier_from_subscription(self, subscription_name,
source_identifier):
"""
Removes a source identifier from an existing RDS event
notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to remove a source identifier from.
:type source_identifier: string
:param source_identifier: The source identifier to be removed from the
subscription, such as the **DB instance identifier** for a DB
instance or the name of a security group.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='RemoveSourceIdentifierFromSubscription',
verb='POST',
path='/', params=params)
def remove_tags_from_resource(self, resource_name, tag_keys):
"""
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be removed
from. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tag_keys: list
:param tag_keys: The tag key (name) of the tag to be removed.
"""
params = {'ResourceName': resource_name, }
self.build_list_params(params,
tag_keys,
'TagKeys.member')
return self._make_request(
action='RemoveTagsFromResource',
verb='POST',
path='/', params=params)
def reset_db_parameter_group(self, db_parameter_group_name,
reset_all_parameters=None, parameters=None):
"""
Modifies the parameters of a DB parameter group to the
engine/system default value. To reset specific parameters
submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB parameter group, specify
the `DBParameterGroup` name and `ResetAllParameters`
parameters. When resetting the entire group, dynamic
parameters are updated immediately and static parameters are
set to `pending-reboot` to take effect on the next DB instance
restart or `RebootDBInstance` request.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type reset_all_parameters: boolean
:param reset_all_parameters: Specifies whether ( `True`) or not (
`False`) to reset all parameters in the DB parameter group to
default values.
Default: `True`
:type parameters: list
:param parameters: An array of parameter names, values, and the apply
method for the parameter update. At least one parameter name,
value, and apply method must be supplied; subsequent arguments are
optional. A maximum of 20 parameters may be modified in a single
request.
**MySQL**
Valid Values (for Apply method): `immediate` | `pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the `pending-reboot` value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
**Oracle**
Valid Values (for Apply method): `pending-reboot`
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ResetDBParameterGroup',
verb='POST',
path='/', params=params)
def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
db_snapshot_identifier,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Creates a new DB instance from a DB snapshot. The target
database is created from the source database restore point
with the same configuration as the original source database,
except that the new RDS instance is created with the default
security group.
:type db_instance_identifier: string
:param db_instance_identifier:
The identifier for the DB snapshot to restore from.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier: Name of the DB instance to create from
the DB snapshot. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type port: integer
:param port: The port number on which the database accepts connections.
Default: The same port as the original DB instance
Constraints: Value must be `1150-65535`
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter doesn't apply to the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: Specifies the amount of provisioned IOPS for the DB
instance, expressed in I/O operations per second. If this parameter
is not specified, the IOPS value will be taken from the backup. If
this parameter is set to 0, the new instance will be converted to a
non-PIOPS instance, which will take additional time, though your DB
instance will be available for connections before the conversion
starts.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'DBSnapshotIdentifier': db_snapshot_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceFromDBSnapshot',
verb='POST',
path='/', params=params)
def restore_db_instance_to_point_in_time(self,
source_db_instance_identifier,
target_db_instance_identifier,
restore_time=None,
use_latest_restorable_time=None,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Restores a DB instance to an arbitrary point-in-time. Users
can restore to any point in time before the
latestRestorableTime for up to backupRetentionPeriod days. The
target database is created from the source database with the
same configuration as the original database except that the DB
instance is created with the default DB security group.
:type source_db_instance_identifier: string
:param source_db_instance_identifier:
The identifier of the source DB instance from which to restore.
Constraints:
+ Must be the identifier of an existing database instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type target_db_instance_identifier: string
:param target_db_instance_identifier:
The name of the new database instance to be created.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type restore_time: timestamp
:param restore_time: The date and time to restore from.
Valid Values: Value must be a UTC time
Constraints:
+ Must be before the latest restorable time for the DB instance
+ Cannot be specified if UseLatestRestorableTime parameter is true
Example: `2009-09-07T23:45:00Z`
:type use_latest_restorable_time: boolean
:param use_latest_restorable_time: Specifies whether ( `True`) or not (
`False`) the DB instance is restored from the latest backup time.
Default: `False`
Constraints: Cannot be specified if RestoreTime parameter is provided.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
Default: The same DBInstanceClass as the original DB instance.
:type port: integer
:param port: The port number on which the database accepts connections.
Constraints: Value must be `1150-65535`
Default: The same port as the original DB instance.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to true.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter is not used for the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags. Tags must be passed as tuples in the form
[('key1', 'valueForKey1'), ('key2', 'valueForKey2')]
"""
params = {
'SourceDBInstanceIdentifier': source_db_instance_identifier,
'TargetDBInstanceIdentifier': target_db_instance_identifier,
}
if restore_time is not None:
params['RestoreTime'] = restore_time
if use_latest_restorable_time is not None:
params['UseLatestRestorableTime'] = str(
use_latest_restorable_time).lower()
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceToPointInTime',
verb='POST',
path='/', params=params)
def revoke_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Revokes ingress from a DBSecurityGroup for previously
authorized IP ranges or EC2 or VPC Security Groups. Required
parameters for this API are one of CIDRIP, EC2SecurityGroupId
for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId).
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
revoke ingress from.
:type cidrip: string
:param cidrip: The IP range to revoke access from. Must be a valid CIDR
range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
`EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: The id of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS Account Number of the owner
of the EC2 security group specified in the `EC2SecurityGroupName`
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
Asana/boto
|
boto/rds2/layer1.py
|
Python
|
mit
| 159,859 | 0.00015 |
# (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest, mock
from ansible.errors import AnsibleError
from ansible.plugins.cache import FactCache, CachePluginAdjudicator
from ansible.plugins.cache.base import BaseCacheModule
from ansible.plugins.cache.memory import CacheModule as MemoryCache
from ansible.plugins.loader import cache_loader
import pytest
class TestCachePluginAdjudicator:
# memory plugin cache
cache = CachePluginAdjudicator()
cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
cache['cache_key_2'] = {'key': 'value'}
def test___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
assert self.cache['new_cache_key'] == {'new_key1': ['new_value1', 'new_value2']}
def test_inner___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
self.cache['new_cache_key']['new_key1'][0] = 'updated_value1'
assert self.cache['new_cache_key'] == {'new_key1': ['updated_value1', 'new_value2']}
def test___contains__(self):
assert 'cache_key' in self.cache
assert 'not_cache_key' not in self.cache
def test_get(self):
assert self.cache.get('cache_key') == {'key1': 'value1', 'key2': 'value2'}
def test_get_with_default(self):
assert self.cache.get('foo', 'bar') == 'bar'
def test_get_without_default(self):
assert self.cache.get('foo') is None
def test___getitem__(self):
with pytest.raises(KeyError) as err:
self.cache['foo']
def test_pop_with_default(self):
assert self.cache.pop('foo', 'bar') == 'bar'
def test_pop_without_default(self):
with pytest.raises(KeyError) as err:
assert self.cache.pop('foo')
def test_pop(self):
v = self.cache.pop('cache_key_2')
assert v == {'key': 'value'}
assert 'cache_key_2' not in self.cache
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
class TestFactCache(unittest.TestCase):
def setUp(self):
with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
self.cache = FactCache()
def test_copy(self):
self.cache['avocado'] = 'fruit'
self.cache['daisy'] = 'flower'
a_copy = self.cache.copy()
self.assertEqual(type(a_copy), dict)
self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
def test_plugin_load_failure(self):
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
self.assertRaisesRegexp(AnsibleError,
"Unable to load the facts cache plugin.*json.*",
FactCache)
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_update_legacy(self):
self.cache.update('cache_key', {'key2': 'updatedvalue'})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_update_legacy_key_exists(self):
self.cache['cache_key'] = {'key': 'value', 'key2': 'value2'}
self.cache.update('cache_key', {'key': 'updatedvalue'})
assert self.cache['cache_key']['key'] == 'updatedvalue'
assert self.cache['cache_key']['key2'] == 'value2'
class TestAbstractClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_subclass_error(self):
class CacheModule1(BaseCacheModule):
pass
with self.assertRaises(TypeError):
CacheModule1() # pylint: disable=abstract-class-instantiated
class CacheModule2(BaseCacheModule):
def get(self, key):
super(CacheModule2, self).get(key)
with self.assertRaises(TypeError):
CacheModule2() # pylint: disable=abstract-class-instantiated
def test_subclass_success(self):
class CacheModule3(BaseCacheModule):
def get(self, key):
super(CacheModule3, self).get(key)
def set(self, key, value):
super(CacheModule3, self).set(key, value)
def keys(self):
super(CacheModule3, self).keys()
def contains(self, key):
super(CacheModule3, self).contains(key)
def delete(self, key):
super(CacheModule3, self).delete(key)
def flush(self):
super(CacheModule3, self).flush()
def copy(self):
super(CacheModule3, self).copy()
self.assertIsInstance(CacheModule3(), CacheModule3)
def test_memory_cachemodule(self):
self.assertIsInstance(MemoryCache(), MemoryCache)
def test_memory_cachemodule_with_loader(self):
self.assertIsInstance(cache_loader.get('memory'), MemoryCache)
|
Shaps/ansible
|
test/units/plugins/cache/test_cache.py
|
Python
|
gpl-3.0
| 5,922 | 0.001013 |
#!/usr/bin/env python2.7
import feedparser
import sys
import string
# holds the feeds
feeds = []
Feed_list = []
kill = 'g'
tags = 'g'
# multiple feed list
Feed_list = [
'http://krebsonsecurity.com/feed/',
'http://www.tripwire.com/state-of-security/feed/',
'https://threatpost.com/feed'
]
# appends list of feeds via feed parser into big list, slow to load in
for url in Feed_list:
feeds.append(feedparser.parse(url))
def full_list():
# option e feed by chunk
for feed in feeds:
for post in feed.entries:
print '++++++'
print post.title
print post.description
print post.link
print '++++++'
kill == raw_input('next? ')
if kill == 'y':
menu()
def latest_list():
# option d - line by line*
for feed in feeds:
for post in feed.entries:
print post.title
print ' '
print post.summary
print post.link
print '+++++'
kill == raw_input('next? ')
if kill == 'y':
menu()
def quick_list():
# option c line by line
for feed in feeds:
for post in feed.entries:
print post.title
print post.link
print '+++++'
kill == raw_input('next? ')
if kill == 'y':
menu()
# deals with keywords
def keyword_title(term):
for feed in feeds:
for post in feed.entries:
if term in post.title:
print 'found keyword ' + term + ' on '
print post.title + '/n' + post.link
again1()
if term not in post.summary:
print 'not found'
again1()
def keyword_full(term):
for feed in feeds:
for post in feed.entries:
if term in post.summary:
print 'found keyword ' + term + ' on ' + post.link
again()
if term not in post.summary:
print 'not found'
again()
# tag listing
def tag_list():
for feed in feeds:
for post in feed.entries:
tags = str(post.tags)
tags = ''.join([c for c in tags if c not in ('{', '}', ':', '[', ']', ',' )])
tags = tags.replace( 'scheme', '' )
tags = tags.replace('term', '')
tags = tags.replace('scheme', '')
tags = tags.replace('None', '')
tags = tags.replace('label', '')
tags = tags.replace("u'", '')
tags = tags.replace("'", '')
tags = tags.replace(" ", '')
# {'term': u'threat intelligence', 'scheme': None, 'label': None}]
print tags
# repeaters
def again():
go_again = raw_input('again? ')
if go_again == 'y':
term = raw_input('Search term? ')
keyword_full(term)
if go_again == 'n':
menu()
def again1():
go_again1 = raw_input('again? ')
if go_again1 == 'y':
term = raw_input('Search term? ')
keyword_title(term)
if go_again1 == 'n':
menu()
def menu():
print 'Welcome Dr Falken'
print ' '
print 'a) Deep RSS Keyword Search'
print 'b) RSS title search'
print 'c) Top post of feeds'
print 'd) Summary List of Feeds'
print 'e) Full list of feeds'
print 'f) Full tag list'
choice = raw_input('please choose an option ')
print 'chosen ' + choice + ' good choice. '
if choice == 'a':
term = raw_input('Search term ?')
keyword_full(term)
if choice == 'b':
term = raw_input('Search term ?')
keyword_title(term)
if choice == 'c':
quick_list()
if choice == 'd':
latest_list()
if choice == 'e':
full_list()
if choice == 'f':
tag_list()
menu()
|
pdm126/rss-crawler
|
dev.py
|
Python
|
mit
| 4,063 | 0.03618 |
# -*- coding: utf-8 -*-
"""
Spyder Editor
APRI和FIB4推测肝纤维化或肝硬化情况
This is a temporary script file.
"""
import math
#APRI缩写:AST to Platelet Ratio Index
#AST单位iu/l
#PRI单位10**9/L
#如果APRI>2,可能有肝硬化
def APRI(AST,upper_AST,PRI):
apri=((AST*1.0/upper_AST)*100)/PRI
return apri
#FIB-4缩写Fibrosis-4
#age单位:年
#AST和ALT单位:U/L,(U/L和iu/L一般可以通用,前者是中国单位,后者是国际单位)
def FIB4(age,AST,ALT,PRI):
fib4=(age*AST)/(PRI*math.sqrt(ALT))
return fib4
#肝情况推测
def Liver_condition(apri,fib4):
if apri>2:
print ("可能发生肝硬化")
print("如果是慢性乙肝感染者,需要考虑抗病毒药物治疗")
if fib4<1.45:
print("无明显肝纤维化或2级以下肝纤维化(轻度纤维化)")
if fib4>3.25:
print("肝纤维化程度为3~4级或以上")
#提示
def Print_warming():
print("因算法不断改进,计算结果仅供参考。请随访感染科或肝病科专业医生")
def Print_unit():
print("生化指标来自肝功检测和血常规检测")
print("AST单位:iu/l")
print("ALT单位:U/L")
print("PRI单位:10**9/L")
print("年龄单位:年")
print("U/L和iu/L一般可以通用,前者是中国单位,后者是国际单位")
#提示
Print_warming()
#输出生化值单位
print("-"*30)
Print_unit()
print("-"*30)
print("")
print("")
#输入参数
print("请输入以下参数(例如10,23.5等等):")
AST=float(input("天门冬氨酸转移酶值(AST):"))
upper_AST=float(input("天门冬氨酸转移酶(AST)上限值:"))
ALT=float(input("丙氨酸氨基转移酶值(ALT):"))
PRI=float(input("血小板计数值(PRI):"))
age=float(input("年龄:"))
apri=APRI(AST,upper_AST,PRI)
fib4=FIB4(age,AST,ALT,PRI)
print("-"*30)
print("")
print("")
print("推测结果:")
#肝情况推测
Liver_condition(apri,fib4)
|
kyokyos/bioinform
|
HBV_APRI_FIB4.py
|
Python
|
unlicense
| 1,993 | 0.038821 |
#!/usr/bin/env python
'''calibration command handling'''
import time, os
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
class CalibrationModule(mp_module.MPModule):
def __init__(self, mpstate):
super(CalibrationModule, self).__init__(mpstate, "calibration")
self.add_command('ground', self.cmd_ground, 'do a ground start')
self.add_command('level', self.cmd_level, 'set level on a multicopter')
self.add_command('compassmot', self.cmd_compassmot, 'do compass/motor interference calibration')
self.add_command('calpress', self.cmd_calpressure,'calibrate pressure sensors')
self.add_command('accelcal', self.cmd_accelcal, 'do 3D accelerometer calibration')
self.add_command('gyrocal', self.cmd_gyrocal, 'do gyro calibration')
self.accelcal_count = -1
self.accelcal_wait_enter = False
self.compassmot_running = False
self.empty_input_count = 0
def cmd_ground(self, args):
'''do a ground start mode'''
self.master.calibrate_imu()
def cmd_level(self, args):
'''run a accel level'''
self.master.calibrate_level()
def cmd_accelcal(self, args):
'''do a full 3D accel calibration'''
mav = self.master
# ack the APM to begin 3D calibration of accelerometers
mav.mav.command_long_send(mav.target_system, mav.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 0, 0, 1, 0, 0)
self.accelcal_count = 0
self.accelcal_wait_enter = False
def cmd_gyrocal(self, args):
'''do a full gyro calibration'''
mav = self.master
mav.mav.command_long_send(mav.target_system, mav.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
1, 0, 0, 0, 0, 0, 0)
def mavlink_packet(self, m):
'''handle mavlink packets'''
if self.accelcal_count != -1:
if m.get_type() == 'STATUSTEXT':
# handle accelcal packet
text = str(m.text)
if text.startswith('Place '):
self.accelcal_wait_enter = True
self.empty_input_count = self.mpstate.empty_input_count
def idle_task(self):
'''handle mavlink packets'''
if self.accelcal_count != -1:
if self.accelcal_wait_enter and self.empty_input_count != self.mpstate.empty_input_count:
self.accelcal_wait_enter = False
self.accelcal_count += 1
# tell the APM that user has done as requested
self.master.mav.command_ack_send(self.accelcal_count, 1)
if self.accelcal_count >= 6:
self.accelcal_count = -1
if self.compassmot_running:
if self.mpstate.empty_input_count != self.empty_input_count:
# user has hit enter, stop the process
self.compassmot_running = False
print("sending stop")
self.master.mav.command_ack_send(0, 1)
def cmd_compassmot(self, args):
'''do a compass/motor interference calibration'''
mav = self.master
print("compassmot starting")
mav.mav.command_long_send(mav.target_system, mav.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 0, 0, 0, 1, 0)
self.compassmot_running = True
self.empty_input_count = self.mpstate.empty_input_count
def cmd_calpressure(self, args):
'''calibrate pressure sensors'''
self.master.calibrate_pressure()
def init(mpstate):
'''initialise module'''
return CalibrationModule(mpstate)
|
dshyshov/MAVProxy
|
MAVProxy/modules/mavproxy_calibration.py
|
Python
|
gpl-3.0
| 3,874 | 0.003872 |
from openstates.utils import LXMLMixin
import datetime as dt
from pupa.scrape import Scraper, Event
from .utils import get_short_codes
from requests import HTTPError
import pytz
URL = "http://www.capitol.hawaii.gov/upcominghearings.aspx"
class HIEventScraper(Scraper, LXMLMixin):
def get_related_bills(self, href):
ret = []
try:
page = self.lxmlize(href)
except HTTPError:
return ret
bills = page.xpath(".//a[contains(@href, 'Bills')]")
for bill in bills:
try:
row = next(bill.iterancestors(tag='tr'))
except StopIteration:
continue
tds = row.xpath("./td")
descr = tds[1].text_content()
for i in ['\r\n', '\xa0']:
descr = descr.replace(i, '')
ret.append({"bill_id": bill.text_content(),
"type": "consideration",
"descr": descr})
return ret
def scrape(self):
tz = pytz.timezone("US/Eastern")
get_short_codes(self)
page = self.lxmlize(URL)
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolderCol1_GridView1']")[0]
for event in table.xpath(".//tr")[1:]:
tds = event.xpath("./td")
committee = tds[0].text_content().strip()
descr = [x.text_content() for x in tds[1].xpath(".//span")]
if len(descr) != 1:
raise Exception
descr = descr[0].replace('.', '').strip()
when = tds[2].text_content().strip()
where = tds[3].text_content().strip()
notice = tds[4].xpath(".//a")[0]
notice_href = notice.attrib['href']
notice_name = notice.text
when = dt.datetime.strptime(when, "%m/%d/%Y %I:%M %p")
when = pytz.utc.localize(when)
event = Event(name=descr, start_time=when, classification='committee-meeting',
description=descr, location_name=where, timezone=tz.zone)
if "/" in committee:
committees = committee.split("/")
else:
committees = [committee]
for committee in committees:
if "INFO" not in committee:
committee = self.short_ids.get("committee", {"chamber": "unknown",
"name": committee})
else:
committee = {
"chamber": "joint",
"name": committee,
}
event.add_committee(committee['name'], note='host')
event.add_source(URL)
event.add_document(notice_name,
notice_href,
media_type='text/html')
for bill in self.get_related_bills(notice_href):
a = event.add_agenda_item(description=bill['descr'])
a.add_bill(
bill['bill_id'],
note=bill['type']
)
yield event
|
cliftonmcintosh/openstates
|
openstates/hi/events.py
|
Python
|
gpl-3.0
| 3,151 | 0.001269 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests add_loss API correctness."""
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.keras import Input
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import Model
from tensorflow.python.keras import optimizer_v2
from tensorflow.python.keras import Sequential
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
MAE = losses.MeanAbsoluteError
mae = losses.mean_absolute_error
def get_ctl_train_step(model):
optimizer = optimizer_v2.gradient_descent.SGD(0.05)
def train_step(x, y, w=None):
with backprop.GradientTape() as tape:
if w is not None:
model([x, y, w])
else:
model([x, y])
loss = math_ops.reduce_sum(model.losses)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
return train_step
# TODO(psv): Add tests cases where a model is used in loss function but is
# not part of the training model.
class TestAddLossCorrectness(keras_parameterized.TestCase):
def setUp(self):
super(TestAddLossCorrectness, self).setUp()
self.x = np.array([[0.], [1.], [2.]], dtype='float32')
self.y = np.array([[0.5], [2.], [3.5]], dtype='float32')
self.w = np.array([[1.25], [0.5], [1.25]], dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_loss_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_fit(self):
model = testing_utils.get_model_from_layers([testing_utils.Bias()],
input_shape=(1,))
def callable_loss():
return math_ops.reduce_sum(model.weights)
model.add_loss(callable_loss)
model.compile(
optimizer_v2.gradient_descent.SGD(0.1),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(self.x, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
def callable_loss():
return math_ops.reduce_sum(model.weights)
model.add_loss(callable_loss)
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(3 * math_ops.reduce_mean(sw * mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.025),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_with_sample_weight_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(math_ops.reduce_mean(sw * mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_in_model_call(self):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.bias = testing_utils.Bias()
def call(self, inputs):
outputs = self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], outputs, inputs[2]))
self.add_loss(math_ops.reduce_mean(inputs[2] * mae(inputs[1], outputs)))
return outputs
model = MyModel()
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertEqual(len(model.losses), 2)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
eval_out = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(eval_out, 1.0, 3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_in_layer_call(self):
class MyLayer(layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.bias = testing_utils.Bias()
def call(self, inputs):
out = self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], out, inputs[2]))
self.add_loss(math_ops.reduce_mean(inputs[2] * mae(inputs[1], out)))
return out
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = MyLayer()([inputs, targets, sw])
model = Model([inputs, targets, sw], outputs)
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
output = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(output, 1.0, 3)
output = model.test_on_batch([self.x, self.y, self.w])
self.assertAlmostEqual(output, 1.0, 3)
@keras_parameterized.run_all_keras_modes
def test_loss_on_layer(self):
class MyLayer(layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
inputs = Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer(self):
loss = {}
for reg in [None, 'l2']:
model_layers = [
layers.Dense(
10,
activation='relu',
activity_regularizer=reg,
kernel_initializer='ones',
use_bias=False),
layers.Dense(
1,
activation='sigmoid',
kernel_initializer='ones',
use_bias=False),
]
model = testing_utils.get_model_from_layers(
model_layers, input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.zeros((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer_loss_value(self):
layer = layers.Dense(
1,
kernel_initializer='zeros',
bias_initializer='ones',
activity_regularizer='l2')
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
x = np.ones((10, 10), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
run_eagerly=testing_utils.should_run_eagerly())
loss = model.test_on_batch(x)
self.assertAlmostEqual(0.01, loss, places=4)
@keras_parameterized.run_all_keras_modes
def test_activity_regularizer_batch_independent(self):
inputs = layers.Input(shape=(10,))
x = layers.Dense(10, activation='relu', activity_regularizer='l2')(inputs)
outputs = layers.Dense(1, activation='sigmoid')(x)
model = Model(inputs, outputs)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
run_eagerly=testing_utils.should_run_eagerly())
loss_small_batch = model.test_on_batch(np.ones((10, 10), 'float32'))
loss_big_batch = model.test_on_batch(np.ones((20, 10), 'float32'))
self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
@keras_parameterized.run_all_keras_modes
def test_with_shared_layer(self):
class LayerWithLoss(layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs)
return inputs * 2
shared_layer = LayerWithLoss()
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(array_ops.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertAllClose(m2.losses, [6, 12])
@keras_parameterized.run_all_keras_modes
def test_with_shared_nested_layer(self):
class LayerWithLoss(layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs)
return inputs * 2
class LayerWithNestedLayerWithLoss(layers.Layer):
def __init__(self):
super(LayerWithNestedLayerWithLoss, self).__init__()
self.loss_layer = LayerWithLoss()
def call(self, inputs):
return self.loss_layer(inputs)
shared_layer = LayerWithNestedLayerWithLoss()
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(array_ops.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertAllClose(m2.losses, [6, 12])
@keras_parameterized.run_all_keras_modes
def test_clear_losses(self):
class LayerWithSharedNestedLossLayer(layers.Layer):
def __init__(self):
super(LayerWithSharedNestedLossLayer, self).__init__()
self.loss_layer = layers.ActivityRegularization(l2=0.001)
self.add_weight(shape=(1,), regularizer='l2')
def call(self, x):
x = self.loss_layer(x)
return self.loss_layer(x)
inputs = Input(shape=(1,))
l = LayerWithSharedNestedLossLayer() # Weight loss + 2 activity losses.
x1 = array_ops.ones((1, 1))
_ = l(x1)
if not context.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
x2 = array_ops.ones((1, 1))
_ = l(x2)
if not context.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
outputs = l(inputs)
model = Model(inputs, outputs)
if not context.executing_eagerly():
self.assertEqual(len(model.losses), 7)
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
x3 = array_ops.ones((1, 1))
model(x3)
x4 = array_ops.ones((1, 1))
model(x4)
if context.executing_eagerly():
# Eager losses are cleared every `__call__`.
self.assertEqual(len(model.losses), 3)
else:
self.assertEqual(len(model.losses), 11)
self.assertEqual(len(model.get_losses_for(x3)), 2)
self.assertEqual(len(model.get_losses_for(x4)), 2)
self.assertEqual(len(model.get_losses_for(None)), 1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_invalid_constant_input(self):
inputs = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError,
'Expected a symbolic Tensors or a callable for the loss value'):
model.add_loss(1.)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_invalid_variable_input(self):
inputs = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError,
'Expected a symbolic Tensors or a callable for the loss value'):
model.add_loss(model.weights[0])
@keras_parameterized.run_all_keras_modes
def test_add_entropy_loss_on_functional_model(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(losses.binary_crossentropy(targets, outputs))
model.compile('sgd', run_eagerly=testing_utils.should_run_eagerly())
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertNotIn('Gradients do not exist for variables',
str(mock_log.call_args))
if __name__ == '__main__':
test.main()
|
sarvex/tensorflow
|
tensorflow/python/keras/tests/add_loss_correctness_test.py
|
Python
|
apache-2.0
| 16,393 | 0.007137 |
import unittest
import config_test
from backupcmd.commands import backupCommands
class BackupCommandsTestCase(unittest.TestCase):
"""Test commands passed to main script"""
def test_hyphen_r_option(self):
print 'Pending BackupCommandsTestCase'
self.assertEqual(1,1)
|
ChinaNetCloud/nc-backup-py
|
nc-backup-py/tests/backup_main_commands_test.py
|
Python
|
apache-2.0
| 293 | 0.010239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import gdb
import pwndbg.chain
import pwndbg.commands
import pwndbg.enhance
import pwndbg.file
import pwndbg.which
import pwndbg.wrappers.checksec
import pwndbg.wrappers.readelf
from pwndbg.color import message
parser = argparse.ArgumentParser(description='Calls mprotect. x86_64 only.')
parser.add_argument('addr', help='Page-aligned address to all mprotect on.',
type=int)
parser.add_argument('length', help='Count of bytes to call mprotect on. Needs '
'to be multiple of page size.',
type=int)
parser.add_argument('prot', help='Prot string as in mprotect(2). Eg. '
'"PROT_READ|PROT_EXEC"', type=str)
SYS_MPROTECT = 0x7d
prot_dict = {
'PROT_NONE': 0x0,
'PROT_READ': 0x1,
'PROT_WRITE': 0x2,
'PROT_EXEC': 0x4,
}
def prot_str_to_val(protstr):
'''Heuristic to convert PROT_EXEC|PROT_WRITE to integer value.'''
prot_int = 0
for k in prot_dict:
if k in protstr:
prot_int |= prot_dict[k]
return prot_int
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
@pwndbg.commands.OnlyAmd64
def mprotect(addr, length, prot):
'''Only x86_64.'''
saved_rax = pwndbg.regs.rax
saved_rbx = pwndbg.regs.rbx
saved_rcx = pwndbg.regs.rcx
saved_rdx = pwndbg.regs.rdx
saved_rip = pwndbg.regs.rip
prot_int = prot_str_to_val(prot)
gdb.execute('set $rax={}'.format(SYS_MPROTECT))
gdb.execute('set $rbx={}'.format(addr))
gdb.execute('set $rcx={}'.format(length))
gdb.execute('set $rdx={}'.format(prot_int))
saved_instruction_2bytes = pwndbg.memory.read(pwndbg.regs.rip, 2)
# int 0x80
pwndbg.memory.write(pwndbg.regs.rip, b'\xcd\x80')
# execute syscall
gdb.execute('stepi')
print('mprotect returned {}'.format(pwndbg.regs.rax))
# restore registers and memory
pwndbg.memory.write(saved_rip, saved_instruction_2bytes)
gdb.execute('set $rax={}'.format(saved_rax))
gdb.execute('set $rbx={}'.format(saved_rbx))
gdb.execute('set $rcx={}'.format(saved_rcx))
gdb.execute('set $rdx={}'.format(saved_rdx))
gdb.execute('set $rip={}'.format(saved_rip))
pwndbg.regs.rax = saved_rax
pwndbg.regs.rbx = saved_rbx
pwndbg.regs.rcx = saved_rcx
pwndbg.regs.rdx = saved_rdx
pwndbg.regs.rip = saved_rip
|
anthraxx/pwndbg
|
pwndbg/commands/mprotect.py
|
Python
|
mit
| 2,402 | 0.000833 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualHardwareOption(vim, *args, **kwargs):
'''The VirtualHardwareOption data object contains the options available for all
virtual devices.'''
obj = vim.client.factory.create('ns0:VirtualHardwareOption')
# do some validation checking...
if (len(args) + len(kwargs)) < 14:
raise IndexError('Expected at least 15 arguments got: %d' % len(args))
required = [ 'deviceListReadonly', 'hwVersion', 'memoryMB', 'numCoresPerSocket', 'numCPU',
'numCpuReadonly', 'numIDEControllers', 'numPCIControllers',
'numPS2Controllers', 'numSIOControllers', 'numUSBControllers',
'numUSBXHCIControllers', 'resourceConfigOption', 'virtualDeviceOption' ]
optional = [ 'licensingLimit', 'numSupportedWwnNodes', 'numSupportedWwnPorts',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/virtual_hardware_option.py
|
Python
|
mit
| 1,413 | 0.012031 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This package implements a very simple Qt GUI that can load a
pipeline, change its parameters based on aliases, and execute them on
the spreadsheet."""
from __future__ import division
identifier = 'org.vistrails.vistrails.pipelineedit'
name = 'Pipeline Editor'
version = '0.0.2'
old_identifiers = ['edu.utah.sci.vistrails.pipelineedit']
|
VisTrails/VisTrails
|
vistrails/packages/pipelineEdit/__init__.py
|
Python
|
bsd-3-clause
| 2,254 | 0.011979 |
# -*- coding: utf-8 -*-
import warnings
from statsmodels.compat.pandas import PD_LT_1_4
import os
import numpy as np
import pandas as pd
from statsmodels.multivariate.factor import Factor
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_raises, assert_array_equal,
assert_array_less, assert_allclose)
import pytest
try:
import matplotlib.pyplot as plt
missing_matplotlib = False
plt.switch_backend('Agg')
except ImportError:
missing_matplotlib = True
# Example data
# https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
# viewer.htm#statug_introreg_sect012.htm
X = pd.DataFrame([['Minas Graes', 2.068, 2.070, 1.580, 1, 0],
['Minas Graes', 2.068, 2.074, 1.602, 2, 1],
['Minas Graes', 2.090, 2.090, 1.613, 3, 0],
['Minas Graes', 2.097, 2.093, 1.613, 4, 1],
['Minas Graes', 2.117, 2.125, 1.663, 5, 0],
['Minas Graes', 2.140, 2.146, 1.681, 6, 1],
['Matto Grosso', 2.045, 2.054, 1.580, 7, 0],
['Matto Grosso', 2.076, 2.088, 1.602, 8, 1],
['Matto Grosso', 2.090, 2.093, 1.643, 9, 0],
['Matto Grosso', 2.111, 2.114, 1.643, 10, 1],
['Santa Cruz', 2.093, 2.098, 1.653, 11, 0],
['Santa Cruz', 2.100, 2.106, 1.623, 12, 1],
['Santa Cruz', 2.104, 2.101, 1.653, 13, 0]],
columns=['Loc', 'Basal', 'Occ', 'Max', 'id', 'alt'])
def test_auto_col_name():
# Test auto generated variable names when endog_names is None
mod = Factor(None, 2, corr=np.eye(11), endog_names=None,
smc=False)
assert_array_equal(mod.endog_names,
['var00', 'var01', 'var02', 'var03', 'var04', 'var05',
'var06', 'var07', 'var08', 'var09', 'var10'])
def test_direct_corr_matrix():
# Test specifying the correlation matrix directly
mod = Factor(None, 2, corr=np.corrcoef(X.iloc[:, 1:-1], rowvar=0),
smc=False)
results = mod.fit(tol=1e-10)
a = np.array([[0.965392158864, 0.225880658666255],
[0.967587154301, 0.212758741910989],
[0.929891035996, -0.000603217967568],
[0.486822656362, -0.869649573289374]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# Test set and get endog_names
mod.endog_names = X.iloc[:, 1:-1].columns
assert_array_equal(mod.endog_names, ['Basal', 'Occ', 'Max', 'id'])
# Test set endog_names with the wrong number of elements
assert_raises(ValueError, setattr, mod, 'endog_names',
X.iloc[:, :1].columns)
def test_unknown_fa_method_error():
# Test raise error if an unkonwn FA method is specified in fa.method
mod = Factor(X.iloc[:, 1:-1], 2, method='ab')
assert_raises(ValueError, mod.fit)
def test_example_compare_to_R_output():
# Testing basic functions and compare to R output
# R code for producing the results:
# library(psych)
# library(GPArotation)
# Basal = c(2.068, 2.068, 2.09, 2.097, 2.117, 2.14, 2.045, 2.076, 2.09, 2.111, 2.093, 2.1, 2.104)
# Occ = c(2.07, 2.074, 2.09, 2.093, 2.125, 2.146, 2.054, 2.088, 2.093, 2.114, 2.098, 2.106, 2.101)
# Max = c(1.58, 1.602, 1.613, 1.613, 1.663, 1.681, 1.58, 1.602, 1.643, 1.643, 1.653, 1.623, 1.653)
# id = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
# Y <- cbind(Basal, Occ, Max, id)
# a <- fa(Y, nfactors=2, fm="pa", rotate="none", SMC=FALSE, min.err=1e-10)
# b <- cbind(a$loadings[,1], -a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="Promax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="Varimax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="quartimax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], -a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="oblimin", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# No rotation without squared multiple correlations prior
# produce same results as in R `fa`
mod = Factor(X.iloc[:, 1:-1], 2, smc=False)
results = mod.fit(tol=1e-10)
a = np.array([[0.965392158864, 0.225880658666255],
[0.967587154301, 0.212758741910989],
[0.929891035996, -0.000603217967568],
[0.486822656362, -0.869649573289374]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# No rotation WITH squared multiple correlations prior
# produce same results as in R `fa`
mod = Factor(X.iloc[:, 1:-1], 2, smc=True)
results = mod.fit()
a = np.array([[0.97541115, 0.20280987],
[0.97113975, 0.17207499],
[0.9618705, -0.2004196],
[0.37570708, -0.45821379]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# Same as R GRArotation
results.rotate('varimax')
a = np.array([[0.98828898, -0.12587155],
[0.97424206, -0.15354033],
[0.84418097, -0.502714],
[0.20601929, -0.55558235]])
assert_array_almost_equal(results.loadings, a, decimal=8)
results.rotate('quartimax') # Same as R fa
a = np.array([[0.98935598, 0.98242714, 0.94078972, 0.33442284],
[0.117190049, 0.086943252, -0.283332952, -0.489159543]])
assert_array_almost_equal(results.loadings, a.T, decimal=8)
results.rotate('equamax') # Not the same as R fa
results.rotate('promax') # Not the same as R fa
results.rotate('biquartimin') # Not the same as R fa
results.rotate('oblimin') # Same as R fa
a = np.array([[1.02834170170, 1.00178840104, 0.71824931384,
-0.00013510048],
[0.06563421, 0.03096076, -0.39658839, -0.59261944]])
assert_array_almost_equal(results.loadings, a.T, decimal=8)
# Testing result summary string
results.rotate('varimax')
desired = (
""" Factor analysis results
=============================
Eigenvalues
-----------------------------
Basal Occ Max id
-----------------------------
2.9609 0.3209 0.0000 -0.0000
-----------------------------
-----------------------------
Communality
-----------------------------
Basal Occ Max id
-----------------------------
0.9926 0.9727 0.9654 0.3511
-----------------------------
-----------------------------
Pre-rotated loadings
-----------------------------------
factor 0 factor 1
-----------------------------------
Basal 0.9754 0.2028
Occ 0.9711 0.1721
Max 0.9619 -0.2004
id 0.3757 -0.4582
-----------------------------
-----------------------------
varimax rotated loadings
-----------------------------------
factor 0 factor 1
-----------------------------------
Basal 0.9883 -0.1259
Occ 0.9742 -0.1535
Max 0.8442 -0.5027
id 0.2060 -0.5556
=============================
""")
actual = results.summary().as_text()
actual = "\n".join(line.rstrip() for line in actual.splitlines()) + "\n"
assert_equal(actual, desired)
@pytest.mark.skipif(missing_matplotlib, reason='matplotlib not available')
def test_plots(close_figures):
mod = Factor(X.iloc[:, 1:], 3)
results = mod.fit()
results.rotate('oblimin')
fig = results.plot_scree()
fig_loadings = results.plot_loadings()
assert_equal(3, len(fig_loadings))
@pytest.mark.smoke
def test_getframe_smoke():
# mostly smoke tests for now
mod = Factor(X.iloc[:, 1:-1], 2, smc=True)
res = mod.fit()
df = res.get_loadings_frame(style='raw')
assert_(isinstance(df, pd.DataFrame))
lds = res.get_loadings_frame(style='strings', decimals=3, threshold=0.3)
# The Styler option require jinja2, skip if not available
try:
from jinja2 import Template # noqa:F401
except ImportError:
return
# TODO: separate this and do pytest.skip?
# Old implementation that warns
if PD_LT_1_4:
with warnings.catch_warnings():
warnings.simplefilter("always")
lds.to_latex()
else:
# Smoke test using new style to_latex
lds.style.to_latex()
try:
from pandas.io import formats as pd_formats
except ImportError:
from pandas import formats as pd_formats
ldf = res.get_loadings_frame(style='display')
assert_(isinstance(ldf, pd_formats.style.Styler))
assert_(isinstance(ldf.data, pd.DataFrame))
res.get_loadings_frame(style='display', decimals=3, threshold=0.2)
res.get_loadings_frame(style='display', decimals=3, color_max='GAINSBORO')
res.get_loadings_frame(style='display', decimals=3, threshold=0.45, highlight_max=False, sort_=False)
def test_factor_missing():
xm = X.iloc[:, 1:-1].copy()
nobs, k_endog = xm.shape
xm.iloc[2,2] = np.nan
mod = Factor(xm, 2)
assert_equal(mod.nobs, nobs - 1)
assert_equal(mod.k_endog, k_endog)
assert_equal(mod.endog.shape, (nobs - 1, k_endog))
def _zscore(x):
# helper function
return (x - x.mean(0)) / x.std(0)
@pytest.mark.smoke
def test_factor_scoring():
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
csv_path = os.path.join(dir_path, 'results', 'factor_data.csv')
y = pd.read_csv(csv_path)
csv_path = os.path.join(dir_path, 'results', 'factors_stata.csv')
f_s = pd.read_csv(csv_path)
# mostly smoke tests for now
mod = Factor(y, 2)
res = mod.fit(maxiter=1)
res.rotate('varimax')
f_reg = res.factor_scoring(method='reg')
assert_allclose(f_reg * [1, -1], f_s[["f1", 'f2']].values,
atol=1e-4, rtol=1e-3)
f_bart = res.factor_scoring()
assert_allclose(f_bart * [1, -1], f_s[["f1b", 'f2b']].values,
atol=1e-4, rtol=1e-3)
# check we have high correlation to ols and gls
f_ols = res.factor_scoring(method='ols')
f_gls = res.factor_scoring(method='gls')
f_reg_z = _zscore(f_reg)
f_ols_z = _zscore(f_ols)
f_gls_z = _zscore(f_gls)
assert_array_less(0.98, (f_ols_z * f_reg_z).mean(0))
assert_array_less(0.999, (f_gls_z * f_reg_z).mean(0))
# with oblique rotation
res.rotate('oblimin')
# Note: Stata has second factor with flipped sign compared to statsmodels
assert_allclose(res._corr_factors()[0, 1], (-1) * 0.25651037, rtol=1e-3)
f_reg = res.factor_scoring(method='reg')
assert_allclose(f_reg * [1, -1], f_s[["f1o", 'f2o']].values,
atol=1e-4, rtol=1e-3)
f_bart = res.factor_scoring()
assert_allclose(f_bart * [1, -1], f_s[["f1ob", 'f2ob']].values,
atol=1e-4, rtol=1e-3)
# check we have high correlation to ols and gls
f_ols = res.factor_scoring(method='ols')
f_gls = res.factor_scoring(method='gls')
f_reg_z = _zscore(f_reg)
f_ols_z = _zscore(f_ols)
f_gls_z = _zscore(f_gls)
assert_array_less(0.97, (f_ols_z * f_reg_z).mean(0))
assert_array_less(0.999, (f_gls_z * f_reg_z).mean(0))
# check provided endog
f_ols2 = res.factor_scoring(method='ols', endog=res.model.endog)
assert_allclose(f_ols2, f_ols, rtol=1e-13)
|
statsmodels/statsmodels
|
statsmodels/multivariate/tests/test_factor.py
|
Python
|
bsd-3-clause
| 11,521 | 0.000868 |
# -*- coding: utf-8 -*-
"""
This serialiser will output an RDF Graph as a JSON-LD formatted document. See:
http://json-ld.org/
Example usage::
>>> from rdflib.plugin import register, Serializer
>>> register('json-ld', Serializer, 'rdflib_jsonld.serializer', 'JsonLDSerializer')
>>> from rdflib import Graph
>>> testrdf = '''
... @prefix dc: <http://purl.org/dc/terms/> .
... <http://example.org/about>
... dc:title "Someone's Homepage"@en .
... '''
>>> g = Graph().parse(data=testrdf, format='n3')
>>> print(g.serialize(format='json-ld', indent=4).decode())
[
{
"@id": "http://example.org/about",
"http://purl.org/dc/terms/title": [
{
"@language": "en",
"@value": "Someone's Homepage"
}
]
}
]
"""
# NOTE: This code writes the entire JSON object into memory before serialising,
# but we should consider streaming the output to deal with arbitrarily large
# graphs.
import warnings
from rdflib.serializer import Serializer
from rdflib.graph import Graph
from rdflib.term import URIRef, Literal, BNode
from rdflib.namespace import RDF, XSD
from .context import Context, UNDEF
from .util import json
from .keys import CONTEXT, GRAPH, ID, VOCAB, LIST, SET, LANG
__all__ = ['JsonLDSerializer', 'from_rdf']
PLAIN_LITERAL_TYPES = set([XSD.boolean, XSD.integer, XSD.double, XSD.string])
class JsonLDSerializer(Serializer):
def __init__(self, store):
super(JsonLDSerializer, self).__init__(store)
def serialize(self, stream, base=None, encoding=None, **kwargs):
# TODO: docstring w. args and return value
encoding = encoding or 'utf-8'
if encoding not in ('utf-8', 'utf-16'):
warnings.warn("JSON should be encoded as unicode. " +
"Given encoding was: %s" % encoding)
context_data = kwargs.get('context')
use_native_types = kwargs.get('use_native_types', True),
use_rdf_type = kwargs.get('use_rdf_type', False)
auto_compact = kwargs.get('auto_compact', True)
indent = kwargs.get('indent', 2)
separators = kwargs.get('separators', (',', ': '))
sort_keys = kwargs.get('sort_keys', True)
ensure_ascii = kwargs.get('ensure_ascii', False)
obj = from_rdf(self.store, context_data, base,
use_native_types, use_rdf_type,
auto_compact=auto_compact)
data = json.dumps(obj, indent=indent, separators=separators,
sort_keys=sort_keys, ensure_ascii=ensure_ascii)
stream.write(data.encode(encoding, 'replace'))
def from_rdf(graph, context_data=None, base=None,
use_native_types=False, use_rdf_type=False,
auto_compact=False, startnode=None, index=False):
# TODO: docstring w. args and return value
# TODO: support for index and startnode
if not context_data and auto_compact:
context_data = dict(
(pfx, unicode(ns))
for (pfx, ns) in graph.namespaces() if pfx and
unicode(ns) != u"http://www.w3.org/XML/1998/namespace")
if isinstance(context_data, Context):
context = context_data
context_data = context.to_dict()
else:
context = Context(context_data, base=base)
converter = Converter(context, use_native_types, use_rdf_type)
result = converter.convert(graph)
if converter.context.active:
if isinstance(result, list):
result = {context.get_key(GRAPH): result}
result[CONTEXT] = context_data
return result
class Converter(object):
def __init__(self, context, use_native_types, use_rdf_type):
self.context = context
self.use_native_types = context.active or use_native_types
self.use_rdf_type = use_rdf_type
def convert(self, graph):
# TODO: bug in rdflib dataset parsing (nquads et al):
# plain triples end up in separate unnamed graphs (rdflib issue #436)
if graph.context_aware:
default_graph = Graph()
graphs = [default_graph]
for g in graph.contexts():
if isinstance(g.identifier, URIRef):
graphs.append(g)
else:
default_graph += g
else:
graphs = [graph]
context = self.context
objs = []
for g in graphs:
obj = {}
graphname = None
if isinstance(g.identifier, URIRef):
graphname = context.shrink_iri(g.identifier)
obj[context.id_key] = graphname
nodes = self.from_graph(g)
if not graphname and len(nodes) == 1:
obj.update(nodes[0])
else:
if not nodes:
continue
obj[context.graph_key] = nodes
if objs and objs[0].get(context.get_key(ID)) == graphname:
objs[0].update(obj)
else:
objs.append(obj)
if len(graphs) == 1 and len(objs) == 1 and not self.context.active:
default = objs[0]
items = default.get(context.graph_key)
if len(default) == 1 and items:
objs = items
elif len(objs) == 1 and self.context.active:
objs = objs[0]
return objs
def from_graph(self, graph):
nodemap = {}
for s in set(graph.subjects()):
## only iri:s and unreferenced (rest will be promoted to top if needed)
if isinstance(s, URIRef) or (isinstance(s, BNode)
and not any(graph.subjects(None, s))):
self.process_subject(graph, s, nodemap)
return nodemap.values()
def process_subject(self, graph, s, nodemap):
if isinstance(s, URIRef):
node_id = self.context.shrink_iri(s)
elif isinstance(s, BNode):
node_id = s.n3()
else:
node_id = None
#used_as_object = any(graph.subjects(None, s))
if node_id in nodemap:
return None
node = {}
node[self.context.id_key] = node_id
nodemap[node_id] = node
for p, o in graph.predicate_objects(s):
self.add_to_node(graph, s, p, o, node, nodemap)
return node
def add_to_node(self, graph, s, p, o, s_node, nodemap):
context = self.context
if isinstance(o, Literal):
datatype = unicode(o.datatype) if o.datatype else None
language = o.language
term = context.find_term(unicode(p), datatype, language=language)
else:
containers = [LIST, None] if graph.value(o, RDF.first) else [None]
for container in containers:
for coercion in (ID, VOCAB, UNDEF):
term = context.find_term(unicode(p), coercion, container)
if term:
break
if term:
break
node = None
use_set = not context.active
if term:
p_key = term.name
if term.type:
node = self.type_coerce(o, term.type)
elif term.language and o.language == term.language:
node = unicode(o)
elif context.language and (
term.language is None and o.language is None):
node = unicode(o)
if term.container == SET:
use_set = True
elif term.container == LIST:
node = [self.type_coerce(v, term.type) or self.to_raw_value(graph, s, v, nodemap)
for v in self.to_collection(graph, o)]
elif term.container == LANG and language:
value = s_node.setdefault(p_key, {})
values = value.get(language)
node = unicode(o)
if values:
if not isinstance(values, list):
value[language] = values = [values]
values.append(node)
else:
value[language] = node
return
else:
p_key = context.to_symbol(p)
# TODO: for coercing curies - quite clumsy; unify to_symbol and find_term?
key_term = context.terms.get(p_key)
if key_term and (key_term.type or key_term.container):
p_key = p
if not term and p == RDF.type and not self.use_rdf_type:
if isinstance(o, URIRef):
node = context.to_symbol(o)
p_key = context.type_key
if node is None:
node = self.to_raw_value(graph, s, o, nodemap)
value = s_node.get(p_key)
if value:
if not isinstance(value, list):
value = [value]
value.append(node)
elif use_set:
value = [node]
else:
value = node
s_node[p_key] = value
def type_coerce(self, o, coerce_type):
if coerce_type == ID:
if isinstance(o, URIRef):
return self.context.shrink_iri(o)
elif isinstance(o, BNode):
return o.n3()
else:
return o
elif coerce_type == VOCAB and isinstance(o, URIRef):
return self.context.to_symbol(o)
elif isinstance(o, Literal) and unicode(o.datatype) == coerce_type:
return o
else:
return None
def to_raw_value(self, graph, s, o, nodemap):
context = self.context
coll = self.to_collection(graph, o)
if coll is not None:
coll = [self.to_raw_value(graph, s, lo, nodemap)
for lo in self.to_collection(graph, o)]
return {context.list_key: coll}
elif isinstance(o, BNode):
embed = False # TODO: self.context.active or using startnode and only one ref
onode = self.process_subject(graph, o, nodemap)
if onode:
if embed and not any(s2 for s2 in graph.subjects(None, o) if s2 != s):
return onode
else:
nodemap[onode[context.id_key]] = onode
return {context.id_key: o.n3()}
elif isinstance(o, URIRef):
# TODO: embed if o != startnode (else reverse)
return {context.id_key: context.shrink_iri(o)}
elif isinstance(o, Literal):
# TODO: if compact
native = self.use_native_types and o.datatype in PLAIN_LITERAL_TYPES
if native:
v = o.toPython()
else:
v = unicode(o)
if o.datatype:
if native:
if self.context.active:
return v
else:
return {context.value_key: v}
return {context.type_key: context.to_symbol(o.datatype),
context.value_key: v}
elif o.language and o.language != context.language:
return {context.lang_key: o.language,
context.value_key: v}
elif not context.active or context.language and not o.language:
return {context.value_key: v}
else:
return v
def to_collection(self, graph, l):
if l != RDF.nil and not graph.value(l, RDF.first):
return None
list_nodes = []
chain = set([l])
while l:
if l == RDF.nil:
return list_nodes
if isinstance(l, URIRef):
return None
first, rest = None, None
for p, o in graph.predicate_objects(l):
if not first and p == RDF.first:
first = o
elif not rest and p == RDF.rest:
rest = o
elif p != RDF.type or o != RDF.List:
return None
list_nodes.append(first)
l = rest
if l in chain:
return None
chain.add(l)
|
twamarc/schemaorg
|
lib/rdflib_jsonld/serializer.py
|
Python
|
apache-2.0
| 12,215 | 0.001555 |
# ICE Revision: $Id$
"""Read a STL file and do simple manipulations"""
from os import path
from PyFoam.Error import error
from PyFoam.ThirdParty.six import next as iterNext
class STLFile(object):
"""Store a complete STL-file and do simple manipulations with it"""
noName="<no name given>"
def __init__(self,fName=None):
"""
@param fName: filename of the STL-file. If None then an empty file is created
"""
self._fp=None
if hasattr(fName, 'read'):
# seems to be a filehandle
self._fp=fName
if hasattr(fName,'name'):
self._filename=fName.name
else:
self._filename="<filehandle>"
else:
self._filename=fName
if self._fp==None:
if fName!=None:
self._contents=[l.strip() for l in open(fName).readlines()]
else:
self._contents=[]
else:
self._contents=[l.strip() for l in self._fp.readlines()]
self.resetInfo()
def resetInfo(self):
"""Set cached info to nothing"""
self._patchInfo=None
def filename(self):
"""The filename (without the full patch)"""
if self._filename==None:
return "<no filename given>"
else:
return path.basename(self._filename)
def expectedToken(self,l,token,i):
if l.strip().find(token)!=0:
error("'%s' expected in line %d of %s" % (token,i+1,self.filename()))
def erasePatches(self,patchNames):
"""Erase the patches in the list"""
processed=[]
keep=True
currentName=None
for l in self._contents:
nextState=keep
parts=l.split()
if len(parts)>0:
if parts[0]=="endsolid":
nextState=True
if currentName!=parts[1]:
error("Patch name",parts[1],"Expected",currentName)
currentName=None
elif parts[0]=="solid":
currentName=parts[1]
if currentName in patchNames:
keep=False
nextState=False
if keep:
processed.append(l)
keep=nextState
self._contents=processed
def mergePatches(self,patchNames,targetPatchName):
"""Merge the patches in the list and put them into a new patch"""
processed=[]
saved=[]
keep=True
currentName=None
for l in self._contents:
nextState=keep
parts=l.split()
if len(parts)>0:
if parts[0]=="endsolid":
nextState=True
if currentName!=parts[1]:
error("Patch name",parts[1],"Expected",currentName)
currentName=None
elif parts[0]=="solid":
currentName=parts[1]
if currentName in patchNames:
keep=False
nextState=False
if keep:
processed.append(l)
elif len(parts)>0:
if parts[0] not in ["solid","endsolid"]:
saved.append(l)
keep=nextState
self._contents=processed
self._contents.append("solid "+targetPatchName)
self._contents+=saved
self._contents.append("endsolid "+targetPatchName)
def patchInfo(self):
"""Get info about the patches. A list of dictionaries with the relevant information"""
if self._patchInfo:
return self._patchInfo
self._patchInfo=[]
newPatch=True
e=enumerate(self._contents)
goOn=True
while goOn:
try:
i,l=iterNext(e)
if newPatch:
self.expectedToken(l,"solid",i)
info={}
if len(l.split())<2:
info["name"]=self.noName
else:
info["name"]=l.split()[1]
info["start"]=i+1
info["facets"]=0
info["min"]=[1e100]*3
info["max"]=[-1e100]*3
newPatch=False
elif l.strip().find("endsolid")==0:
info["end"]=i+1
self._patchInfo.append(info)
newPatch=True
else:
self.expectedToken(l,"facet normal",i)
i,l=iterNext(e)
self.expectedToken(l,"outer loop",i)
for v in range(3):
i,l=iterNext(e)
self.expectedToken(l,"vertex",i)
info["min"]=[min(m) for m in zip(info["min"],
[float(v) for v in l.strip().split()[1:4]])]
info["max"]=[max(m) for m in zip(info["max"],
[float(v) for v in l.strip().split()[1:4]])]
i,l=iterNext(e)
self.expectedToken(l,"endloop",i)
i,l=iterNext(e)
self.expectedToken(l,"endfacet",i)
info["facets"]+=1
except StopIteration:
goOn=False
if not newPatch:
error("File",self.filename(),"seems to be incomplete")
return self._patchInfo
def writeTo(self,fName):
"""Write to a file"""
if hasattr(fName, 'write'):
f=fName
else:
f=open(fName,"w")
f.write("\n".join(self._contents))
def __iter__(self):
for l in self._contents:
yield l
def __iadd__(self,other):
self.resetInfo()
fName=path.splitext(other.filename())[0]
moreThanOne=len(other.patchInfo())>1
nr=1
for l in other:
if l.strip().find("solid")==0 or l.strip().find("endsolid")==0:
parts=l.split()
if len(parts)==1:
l=parts[0]+" "+fName
if moreThanOne:
l+="_%04d" % nr
else:
l=parts[0]+" %s:%s" %(fName," ".join(parts[1:]))
if parts[0]=="solid":
nr+=1
self._contents.append(l)
return self
# Should work with Python3 and Python2
|
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam
|
PyFoam/Basics/STLFile.py
|
Python
|
gpl-2.0
| 6,542 | 0.024152 |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem074.py
#
# Digit factorial chains
# ======================
# Published on Friday, 16th July 2004, 06:00 pm
#
# The number 145 is well known for the property that the sum of the factorial
# of its digits is equal to 145: 1! + 4! + 5! = 1 + 24 + 120 = 145 Perhaps less
# well known is 169, in that it produces the longest chain of numbers that link
# back to 169; it turns out that there are only three such loops that exist:
# 169 363601 1454 169 871 45361 871 872 45362 872 It is not difficult to
# prove that EVERY starting number will eventually get stuck in a loop. For
# example, 69 363600 1454 169 363601 ( 1454) 78 45360 871 45361 ( 871)
# 540 145 ( 145) Starting with 69 produces a chain of five non-repeating
# terms, but the longest non-repeating chain with a starting number below one
# million is sixty terms. How many chains, with a starting number below one
# million, contain exactly sixty non-repeating terms?
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
olduvaihand/ProjectEuler
|
src/python/problem074.py
|
Python
|
mit
| 1,088 | 0.001842 |
from Queue import Queue
import elliptics
class SecondaryIndex(object):
def __init__(self, idx, key_tpl, meta_session):
self.idx = idx
self.key_tpl = key_tpl
self.meta_session = meta_session
def __iter__(self):
for idx in self.meta_session.find_all_indexes([self.idx]):
yield idx.indexes[0].data
def __setitem__(self, key, val):
eid = self.meta_session.transform(self.key_tpl % key)
self.meta_session.set_indexes(eid, [self.idx], [val])
def __getitem__(self, key):
eid = self.meta_session.transform(self.key_tpl % key)
return self.meta_session.list_indexes(eid).get()[0].data
def __delitem__(self, key):
eid = self.meta_session.transform(self.key_tpl % key)
self.meta_session.set_indexes(eid, [], [])
class TagSecondaryIndex(object):
BATCH_SIZE = 500
def __init__(self, main_idx, idx_tpl, key_tpl, meta_session, logger=None, namespace=None, batch_size=BATCH_SIZE):
self.main_idx = main_idx
self.idx_tpl = idx_tpl
self.key_tpl = key_tpl
self.meta_session = meta_session.clone()
if namespace:
self.meta_session.set_namespace(namespace)
self.batch_size = batch_size
self.logger = logger
def __iter__(self):
idxes = [idx.id for idx in
self.meta_session.clone().find_all_indexes([self.main_idx]).get()]
for data in self._iter_keys(idxes):
yield data
def tagged(self, tag):
idxes = [idx.id for idx in
self.meta_session.clone().find_all_indexes([self.main_idx, self.idx_tpl % tag])]
self.logger.info('Received {0} records from tagged index {1}'.format(
len(idxes), self.idx_tpl % tag))
processed = 0
for data in self._iter_keys(idxes):
processed += 1
yield data
self.logger.info('Processed {0} records from tagged index {1}'.format(
processed, self.idx_tpl % tag))
def __setitem__(self, key, val):
eid = self.meta_session.transform(self.key_tpl % key)
self.meta_session.clone().write_data(eid, val).get()
def __getitem__(self, key):
eid = self.meta_session.transform(self.key_tpl % key)
return self.meta_session.clone().read_latest(eid).get()[0].data
def set_tag(self, key, tag=None):
eid = self.meta_session.transform(self.key_tpl % key)
tags = [self.main_idx]
if tag:
tags.append(self.idx_tpl % tag)
self.meta_session.clone().set_indexes(eid, tags, [''] * len(tags))
def _fetch_response_data(self, req):
data = None
try:
result = req[1]
result.wait()
data = result.get()[0].data
except Exception as e:
self.logger.error('Failed to fetch record from tagged index: {0} ({1})'.format(req[0], e))
return data
def _iter_keys(self, keys):
if not keys:
return
q = Queue(self.batch_size)
s = self.meta_session.clone()
for k in keys:
if not q.full():
q.put((k, s.read_latest(k)))
else:
data = self._fetch_response_data(q.get())
q.put((k, s.read_latest(k)))
if data:
yield data
while q.qsize():
data = self._fetch_response_data(q.get())
if data:
yield data
|
yandex/mastermind
|
src/cocaine-app/indexes.py
|
Python
|
gpl-2.0
| 3,478 | 0.001438 |
import warnings
import os
import unittest
from test import test_support
# The warnings module isn't easily tested, because it relies on module
# globals to store configuration information. setUp() and tearDown()
# preserve the current settings to avoid bashing them while running tests.
# To capture the warning messages, a replacement for showwarning() is
# used to save warning information in a global variable.
class WarningMessage:
"Holds results of latest showwarning() call"
pass
def showwarning(message, category, filename, lineno, file=None):
msg.message = str(message)
msg.category = category.__name__
msg.filename = os.path.basename(filename)
msg.lineno = lineno
class TestModule(unittest.TestCase):
def setUp(self):
global msg
msg = WarningMessage()
self._filters = warnings.filters[:]
self._showwarning = warnings.showwarning
warnings.showwarning = showwarning
self.ignored = [w[2].__name__ for w in self._filters
if w[0]=='ignore' and w[1] is None and w[3] is None]
def tearDown(self):
warnings.filters = self._filters[:]
warnings.showwarning = self._showwarning
def test_warn_default_category(self):
for i in range(4):
text = 'multi %d' %i # Different text on each call
warnings.warn(text)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, 'UserWarning')
def test_warn_specific_category(self):
text = 'None'
# XXX OverflowWarning should go away for Python 2.5.
for category in [DeprecationWarning, FutureWarning, OverflowWarning,
PendingDeprecationWarning, RuntimeWarning,
SyntaxWarning, UserWarning, Warning]:
if category.__name__ in self.ignored:
text = 'filtered out' + category.__name__
warnings.warn(text, category)
self.assertNotEqual(msg.message, text)
else:
text = 'unfiltered %s' % category.__name__
warnings.warn(text, category)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, category.__name__)
def test_filtering(self):
warnings.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, warnings.warn, 'convert to error')
warnings.resetwarnings()
text = 'handle normally'
warnings.warn(text)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, 'UserWarning')
warnings.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
warnings.warn(text)
self.assertNotEqual(msg.message, text)
warnings.resetwarnings()
warnings.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, warnings.warn, 'hex/oct')
text = 'nonmatching text'
warnings.warn(text)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, 'UserWarning')
def test_main(verbose=None):
test_support.run_unittest(TestModule)
if __name__ == "__main__":
test_main(verbose=True)
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/test/test_warnings.py
|
Python
|
gpl-2.0
| 3,221 | 0.003105 |
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A specified time period of the year, e.g., Spring, Summer, Fall, Winter
"""
# <<< imports
# @generated
from cpsm.element import Element
from cpsm.load_model import SeasonName
from google.appengine.ext import db
# >>> imports
class Season(Element):
""" A specified time period of the year, e.g., Spring, Summer, Fall, Winter
"""
# <<< season.attributes
# @generated
# Date season ends
end_date = db.DateTimeProperty()
# Date season starts
start_date = db.DateTimeProperty()
# Name of the Season
name = SeasonName
# >>> season.attributes
# <<< season.references
# @generated
# Virtual property. Schedules that use this Season.
pass # season_day_type_schedules
# >>> season.references
# <<< season.operations
# @generated
# >>> season.operations
# EOF -------------------------------------------------------------------------
|
rwl/openpowersystem
|
cpsm/load_model/season.py
|
Python
|
agpl-3.0
| 1,789 | 0.00559 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.