text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
## Close
### What is the value of the first triangle number to have over five hundred divisors?
print max([len(m) for m in map(lambda k: [n for n in range(1,(k+1)) if k%n == 0], [sum(range(n)) for n in range(1,1000)])]) | jacksarick/My-Code | Python/python challenges/euler/012_divisable_tri_nums.py | Python | mit | 219 | 0.03653 |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Compute w/ Cells
"""
import functools
import inspect
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from nova import block_device
from nova.cells import manager
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import quota
from nova import test
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
ORIG_COMPUTE_API = None
cfg.CONF.import_opt('enable', 'nova.cells.opts', group='cells')
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver_class=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
return fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver_class=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def deploy_stubs(stubs, api, original_instance=None):
call = stub_call_to_cells
cast = stub_cast_to_cells
if original_instance:
kwargs = dict(original_instance=original_instance)
call = functools.partial(stub_call_to_cells, **kwargs)
cast = functools.partial(stub_cast_to_cells, **kwargs)
stubs.Set(api, '_call_to_cells', call)
stubs.Set(api, '_cast_to_cells', cast)
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def setUp(self):
super(CellsComputeAPITestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.flags(enable=True, group='cells')
def _fake_cell_read_only(*args, **kwargs):
return False
def _fake_validate_cell(*args, **kwargs):
return
def _nop_update(context, instance, **kwargs):
return instance
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.stubs.Set(self.compute_api, '_cell_read_only',
_fake_cell_read_only)
self.stubs.Set(self.compute_api, '_validate_cell',
_fake_validate_cell)
deploy_stubs(self.stubs, self.compute_api)
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsComputeAPITestCase, self).tearDown()
def test_instance_metadata(self):
self.skipTest("Test is incompatible with cells.")
def test_evacuate(self):
self.skipTest("Test is incompatible with cells.")
def test_error_evacuate(self):
self.skipTest("Test is incompatible with cells.")
def test_delete_instance_no_cell(self):
cells_rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(cells_rpcapi,
'instance_delete_everywhere')
inst = self._create_fake_instance_obj()
cells_rpcapi.instance_delete_everywhere(self.context,
inst, 'hard')
self.mox.ReplayAll()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, inst)
def test_delete_instance_no_cell_constraint_failure_does_not_loop(self):
with mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere'):
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
self.assertRaises(exception.ObjectActionError,
self.compute_api.delete, self.context, inst)
inst.destroy.assert_called_once_with()
def test_delete_instance_no_cell_constraint_failure_corrects_itself(self):
def add_cell_name(context, instance, delete_type):
instance.cell_name = 'fake_cell_name'
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere', side_effect=add_cell_name)
def _test(mock_delete_everywhere, mock_compute_delete):
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
self.compute_api.delete(self.context, inst)
inst.destroy.assert_called_once_with()
mock_compute_delete.assert_called_once_with(self.context, inst)
_test()
def test_delete_instance_no_cell_destroy_fails_already_deleted(self):
# If the instance.destroy() is reached during _local_delete,
# it will raise ObjectActionError if the instance has already
# been deleted by a instance_destroy_at_top, and instance.refresh()
# will raise InstanceNotFound
instance = objects.Instance(uuid='fake-uuid', cell_name=None)
actionerror = exception.ObjectActionError(action='destroy', reason='')
notfound = exception.InstanceNotFound(instance_id=instance.uuid)
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_local_delete',
side_effect=actionerror)
@mock.patch.object(instance, 'refresh', side_effect=notfound)
def _test(mock_refresh, mock_local_delete, mock_delete_everywhere,
mock_compute_delete):
self.compute_api.delete(self.context, instance)
mock_delete_everywhere.assert_called_once_with(self.context,
instance, 'hard')
mock_local_delete.assert_called_once_with(self.context,
instance, mock.ANY, 'delete', self.compute_api._do_delete)
mock_refresh.assert_called_once_with()
self.assertFalse(mock_compute_delete.called)
_test()
def test_delete_instance_no_cell_instance_not_found_already_deleted(self):
# If anything in _local_delete accesses the instance causing a db
# lookup before instance.destroy() is reached, if the instance has
# already been deleted by a instance_destroy_at_top,
# InstanceNotFound will be raised
instance = objects.Instance(uuid='fake-uuid', cell_name=None)
notfound = exception.InstanceNotFound(instance_id=instance.uuid)
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_local_delete',
side_effect=notfound)
def _test(mock_local_delete, mock_delete_everywhere,
mock_compute_delete):
self.compute_api.delete(self.context, instance)
mock_delete_everywhere.assert_called_once_with(self.context,
instance, 'hard')
mock_local_delete.assert_called_once_with(self.context,
instance, mock.ANY, 'delete', self.compute_api._do_delete)
self.assertFalse(mock_compute_delete.called)
_test()
def test_soft_delete_instance_no_cell(self):
cells_rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(cells_rpcapi,
'instance_delete_everywhere')
inst = self._create_fake_instance_obj()
cells_rpcapi.instance_delete_everywhere(self.context,
inst, 'soft')
self.mox.ReplayAll()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.soft_delete(self.context, inst)
def test_get_migrations(self):
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
migrations = {'migrations': [{'id': 1234}]}
cells_rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations')
cells_rpcapi.get_migrations(self.context,
filters).AndReturn(migrations)
self.mox.ReplayAll()
response = self.compute_api.get_migrations(self.context, filters)
self.assertEqual(migrations, response)
def test_create_block_device_mapping(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
instance = self._create_fake_instance_obj()
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': 'fake-image',
'boot_index': 0})]
self.compute_api._create_block_device_mapping(
instance_type, instance.uuid, bdms)
bdms = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEqual(0, len(bdms))
def test_create_bdm_from_flavor(self):
self.skipTest("Test is incompatible with cells.")
@mock.patch('nova.cells.messaging._TargetedMessage')
def test_rebuild_sig(self, mock_msg):
# TODO(belliott) Cells could benefit from better testing to ensure API
# and manager signatures stay up to date
def wire(version):
# wire the rpc cast directly to the manager method to make sure
# the signature matches
cells_mgr = manager.CellsManager()
def cast(context, method, *args, **kwargs):
fn = getattr(cells_mgr, method)
fn(context, *args, **kwargs)
cells_mgr.cast = cast
return cells_mgr
cells_rpcapi = self.compute_api.cells_rpcapi
client = cells_rpcapi.client
with mock.patch.object(client, 'prepare', side_effect=wire):
inst = self._create_fake_instance_obj()
inst.cell_name = 'mycell'
cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None,
None, None, None, None,
recreate=False,
on_shared_storage=False, host='host',
preserve_ephemeral=True, kwargs=None)
# one targeted message should have been created
self.assertEqual(1, mock_msg.call_count)
class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
def setUp(self):
super(CellsConductorAPIRPCRedirect, self).setUp()
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cells_rpcapi = mock.MagicMock()
self.compute_api._compute_task_api.cells_rpcapi = self.cells_rpcapi
self.context = context.RequestContext('fake', 'fake')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(compute_api.API, '_provision_instances')
@mock.patch.object(compute_api.API, '_check_and_transform_bdm')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_validate_and_build_base_options')
def test_build_instances(self, _validate, _get_image, _check_bdm,
_provision, _record_action_start):
_get_image.return_value = (None, 'fake-image')
_validate.return_value = ({}, 1)
_check_bdm.return_value = 'bdms'
_provision.return_value = 'instances'
self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
# Subsequent tests in class are verifying the hooking. We don't check
# args since this is verified in compute test code.
self.assertTrue(self.cells_rpcapi.build_instances.called)
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(compute_api.API, '_resize_cells_support')
@mock.patch.object(compute_api.API, '_reserve_quota_delta')
@mock.patch.object(compute_api.API, '_upsize_quota_delta')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(flavors, 'extract_flavor')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_resize_instance(self, _bdms, _check, _extract, _save, _upsize,
_reserve, _cells, _record):
flavor = objects.Flavor(**test_flavor.fake_flavor)
_extract.return_value = flavor
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
instance.flavor = flavor
instance.old_flavor = instance.new_flavor = None
self.compute_api.resize(self.context, instance)
self.assertTrue(self.cells_rpcapi.resize_instance.called)
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(objects.Instance, 'save')
def test_live_migrate_instance(self, instance_save, _record):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
self.compute_api.live_migrate(self.context, instance,
True, True, 'fake_dest_host')
self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild_instance(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
get_flavor.return_value = ''
image_href = ''
image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': 'x86_64'}}
admin_pass = ''
files_to_inject = []
bdms = []
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
def test_check_equal(self):
task_api = self.compute_api.compute_task_api
tests = set()
for (name, value) in inspect.getmembers(self, inspect.ismethod):
if name.startswith('test_') and name != 'test_check_equal':
tests.add(name[5:])
if tests != set(task_api.cells_compatible):
self.fail("Testcases not equivalent to cells_compatible list")
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
super(CellsComputePolicyTestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.compute_api = compute_cells_api.ComputeCellsAPI()
deploy_stubs(self.stubs, self.compute_api)
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsComputePolicyTestCase, self).tearDown()
| akash1808/nova_test_latest | nova/tests/unit/compute/test_compute_cells.py | Python | apache-2.0 | 18,463 | 0.0013 |
"""
Support for KNX/IP climate devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.knx/
"""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (
PLATFORM_SCHEMA, SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE, STATE_HEAT,
STATE_IDLE, STATE_MANUAL, STATE_DRY,
STATE_FAN_ONLY, STATE_ECO, ClimateDevice)
from homeassistant.const import (
ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.core import callback
from homeassistant.components.knx import DATA_KNX, ATTR_DISCOVER_DEVICES
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES): vol.All(cv.ensure_list,
[vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_controller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config.get(CONF_TEMPERATURE_ADDRESS),
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(
CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(
CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature.value
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.device.setpoint_shift_step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.device.target_temperature.value
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.device.target_temperature_min
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device.target_temperature_max
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self.device.set_target_temperature(temperature)
await self.async_update_ha_state()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode.supports_operation_mode:
return OPERATION_MODES.get(self.device.mode.operation_mode.value)
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [OPERATION_MODES.get(operation_mode.value) for
operation_mode in
self.device.mode.operation_modes]
async def async_set_operation_mode(self, operation_mode):
"""Set operation mode."""
if self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(
OPERATION_MODES_INV.get(operation_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
@property
def is_on(self):
"""Return true if the device is on."""
if self.device.supports_on_off:
return self.device.is_on
return None
async def async_turn_on(self):
"""Turn on."""
await self.device.turn_on()
async def async_turn_off(self):
"""Turn off."""
await self.device.turn_off()
| PetePriority/home-assistant | homeassistant/components/knx/climate.py | Python | apache-2.0 | 11,010 | 0 |
"""Entity class that represents Z-Wave node."""
# pylint: disable=import-outside-toplevel
from itertools import count
from homeassistant.const import ATTR_BATTERY_LEVEL, ATTR_ENTITY_ID, ATTR_WAKEUP
from homeassistant.core import callback
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import async_get_registry
from .const import (
ATTR_BASIC_LEVEL,
ATTR_NODE_ID,
ATTR_SCENE_DATA,
ATTR_SCENE_ID,
COMMAND_CLASS_CENTRAL_SCENE,
COMMAND_CLASS_VERSION,
COMMAND_CLASS_WAKE_UP,
DOMAIN,
EVENT_NODE_EVENT,
EVENT_SCENE_ACTIVATED,
)
from .util import is_node_parsed, node_device_id_and_name, node_name
ATTR_QUERY_STAGE = "query_stage"
ATTR_AWAKE = "is_awake"
ATTR_READY = "is_ready"
ATTR_FAILED = "is_failed"
ATTR_PRODUCT_NAME = "product_name"
ATTR_MANUFACTURER_NAME = "manufacturer_name"
ATTR_NODE_NAME = "node_name"
ATTR_APPLICATION_VERSION = "application_version"
STAGE_COMPLETE = "Complete"
_REQUIRED_ATTRIBUTES = [
ATTR_QUERY_STAGE,
ATTR_AWAKE,
ATTR_READY,
ATTR_FAILED,
"is_info_received",
"max_baud_rate",
"is_zwave_plus",
]
_OPTIONAL_ATTRIBUTES = ["capabilities", "neighbors", "location"]
_COMM_ATTRIBUTES = [
"sentCnt",
"sentFailed",
"retries",
"receivedCnt",
"receivedDups",
"receivedUnsolicited",
"sentTS",
"receivedTS",
"lastRequestRTT",
"averageRequestRTT",
"lastResponseRTT",
"averageResponseRTT",
]
ATTRIBUTES = _REQUIRED_ATTRIBUTES + _OPTIONAL_ATTRIBUTES
class ZWaveBaseEntity(Entity):
"""Base class for Z-Wave Node and Value entities."""
def __init__(self):
"""Initialize the base Z-Wave class."""
self._update_scheduled = False
def maybe_schedule_update(self):
"""Maybe schedule state update.
If value changed after device was created but before setup_platform
was called - skip updating state.
"""
if self.hass and not self._update_scheduled:
self.hass.add_job(self._schedule_update)
@callback
def _schedule_update(self):
"""Schedule delayed update."""
if self._update_scheduled:
return
@callback
def do_update():
"""Really update."""
self.async_write_ha_state()
self._update_scheduled = False
self._update_scheduled = True
self.hass.loop.call_later(0.1, do_update)
def try_remove_and_add(self):
"""Remove this entity and add it back."""
async def _async_remove_and_add():
await self.async_remove()
self.entity_id = None
await self.platform.async_add_entities([self])
if self.hass and self.platform:
self.hass.add_job(_async_remove_and_add)
async def node_removed(self):
"""Call when a node is removed from the Z-Wave network."""
await self.async_remove()
registry = await async_get_registry(self.hass)
if self.entity_id not in registry.entities:
return
registry.async_remove(self.entity_id)
class ZWaveNodeEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node."""
def __init__(self, node, network):
"""Initialize node."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._network = network
self.node = node
self.node_id = self.node.node_id
self._name = node_name(self.node)
self._product_name = node.product_name
self._manufacturer_name = node.manufacturer_name
self._unique_id = self._compute_unique_id()
self._application_version = None
self._attributes = {}
self.wakeup_interval = None
self.location = None
self.battery_level = None
dispatcher.connect(
self.network_node_value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED
)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NODE)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NOTIFICATION)
dispatcher.connect(self.network_node_event, ZWaveNetwork.SIGNAL_NODE_EVENT)
dispatcher.connect(
self.network_scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT
)
@property
def unique_id(self):
"""Return unique ID of Z-wave node."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
identifier, name = node_device_id_and_name(self.node)
info = {
"identifiers": {identifier},
"manufacturer": self.node.manufacturer_name,
"model": self.node.product_name,
"name": name,
}
if self.node_id > 1:
info["via_device"] = (DOMAIN, 1)
return info
def maybe_update_application_version(self, value):
"""Update application version if value is a Command Class Version, Application Value."""
if (
value
and value.command_class == COMMAND_CLASS_VERSION
and value.label == "Application Version"
):
self._application_version = value.data
def network_node_value_added(self, node=None, value=None, args=None):
"""Handle a added value to a none on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and "nodeId" in args and args["nodeId"] != self.node_id:
return
self.maybe_update_application_version(value)
def network_node_changed(self, node=None, value=None, args=None):
"""Handle a changed node on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and "nodeId" in args and args["nodeId"] != self.node_id:
return
# Process central scene activation
if value is not None and value.command_class == COMMAND_CLASS_CENTRAL_SCENE:
self.central_scene_activated(value.index, value.data)
self.maybe_update_application_version(value)
self.node_changed()
def get_node_statistics(self):
"""Retrieve statistics from the node."""
return self._network.manager.getNodeStatistics(
self._network.home_id, self.node_id
)
def node_changed(self):
"""Update node properties."""
attributes = {}
stats = self.get_node_statistics()
for attr in ATTRIBUTES:
value = getattr(self.node, attr)
if attr in _REQUIRED_ATTRIBUTES or value:
attributes[attr] = value
for attr in _COMM_ATTRIBUTES:
attributes[attr] = stats[attr]
if self.node.can_wake_up():
for value in self.node.get_values(COMMAND_CLASS_WAKE_UP).values():
if value.index != 0:
continue
self.wakeup_interval = value.data
break
else:
self.wakeup_interval = None
self.battery_level = self.node.get_battery_level()
self._product_name = self.node.product_name
self._manufacturer_name = self.node.manufacturer_name
self._name = node_name(self.node)
self._attributes = attributes
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._unique_id:
# Node info parsed. Remove and re-add
self.try_remove_and_add()
self.maybe_schedule_update()
async def node_renamed(self, update_ids=False):
"""Rename the node and update any IDs."""
identifier, self._name = node_device_id_and_name(self.node)
# Set the name in the devices. If they're customised
# the customisation will not be stored as name and will stick.
dev_reg = await get_dev_reg(self.hass)
device = dev_reg.async_get_device(identifiers={identifier}, connections=set())
dev_reg.async_update_device(device.id, name=self._name)
# update sub-devices too
for i in count(2):
identifier, new_name = node_device_id_and_name(self.node, i)
device = dev_reg.async_get_device(
identifiers={identifier}, connections=set()
)
if not device:
break
dev_reg.async_update_device(device.id, name=new_name)
# Update entity ID.
if update_ids:
ent_reg = await async_get_registry(self.hass)
new_entity_id = ent_reg.async_generate_entity_id(
DOMAIN, self._name, self.platform.entities.keys() - {self.entity_id}
)
if new_entity_id != self.entity_id:
# Don't change the name attribute, it will be None unless
# customised and if it's been customised, keep the
# customisation.
ent_reg.async_update_entity(self.entity_id, new_entity_id=new_entity_id)
return
# else for the above two ifs, update if not using update_entity
self.async_write_ha_state()
def network_node_event(self, node, value):
"""Handle a node activated event on the network."""
if node.node_id == self.node.node_id:
self.node_event(value)
def node_event(self, value):
"""Handle a node activated event for this node."""
if self.hass is None:
return
self.hass.bus.fire(
EVENT_NODE_EVENT,
{
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node.node_id,
ATTR_BASIC_LEVEL: value,
},
)
def network_scene_activated(self, node, scene_id):
"""Handle a scene activated event on the network."""
if node.node_id == self.node.node_id:
self.scene_activated(scene_id)
def scene_activated(self, scene_id):
"""Handle an activated scene for this node."""
if self.hass is None:
return
self.hass.bus.fire(
EVENT_SCENE_ACTIVATED,
{
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node.node_id,
ATTR_SCENE_ID: scene_id,
},
)
def central_scene_activated(self, scene_id, scene_data):
"""Handle an activated central scene for this node."""
if self.hass is None:
return
self.hass.bus.fire(
EVENT_SCENE_ACTIVATED,
{
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node_id,
ATTR_SCENE_ID: scene_id,
ATTR_SCENE_DATA: scene_data,
},
)
@property
def state(self):
"""Return the state."""
if ATTR_READY not in self._attributes:
return None
if self._attributes[ATTR_FAILED]:
return "dead"
if self._attributes[ATTR_QUERY_STAGE] != "Complete":
return "initializing"
if not self._attributes[ATTR_AWAKE]:
return "sleeping"
if self._attributes[ATTR_READY]:
return "ready"
return None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_NODE_ID: self.node_id,
ATTR_NODE_NAME: self._name,
ATTR_MANUFACTURER_NAME: self._manufacturer_name,
ATTR_PRODUCT_NAME: self._product_name,
}
attrs.update(self._attributes)
if self.battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = self.battery_level
if self.wakeup_interval is not None:
attrs[ATTR_WAKEUP] = self.wakeup_interval
if self._application_version is not None:
attrs[ATTR_APPLICATION_VERSION] = self._application_version
return attrs
def _compute_unique_id(self):
if is_node_parsed(self.node) or self.node.is_ready:
return f"node-{self.node_id}"
return None
| tboyce021/home-assistant | homeassistant/components/zwave/node_entity.py | Python | apache-2.0 | 12,522 | 0.000878 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
line_obj = self.pool['purchase.order.line']
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
line_price = line_obj._calc_line_base_price(cr, uid, line,
context=context)
line_qty = line_obj._calc_line_quantity(cr, uid, line,
context=context)
for c in self.pool['account.tax'].compute_all(
cr, uid, line.taxes_id, line_price, line_qty,
line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise osv.except_osv(_('Error!'), _("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'approved',
'purchase.mt_rfq_done': lambda self, cr, uid, obj, ctx=None: obj.state == 'done',
},
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your supplier. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your supplier."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the supplier must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True)
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order', context=context) or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', order_line_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'], context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': fp or supplier.property_account_position and supplier.property_account_position.id,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': fields.date.context_today(self,cr,uid,context=context)})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(
_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % \
(order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
for move in pick.move_lines:
if pick.state == 'done':
raise osv.except_osv(
_('Unable to cancel the purchase order %s.') % (purchase.name),
_('You have already received some goods for it. '))
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all invoices related to this purchase order.'))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.taxes_id:
taxes = self.pool['account.tax'].compute_all(cr, uid, order_line.taxes_id, price_unit, 1.0,
order_line.product_id, order.partner_id)
price_unit = taxes['total']
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
if order.location_id.usage == 'customer':
name = order_line.product_id.with_context(dict(context or {}, lang=order.dest_address_id.lang)).name
else:
name = order_line.name or ''
move_template = {
'name': name,
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': fields.date.date_to_datetime(self, cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'group_id': procurement.group_id.id or group_id, #move group is same as group of procurements if it exists, otherwise take another group
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
return picking_id
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
self.message_post(cr, uid, ids, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
def _set_po_lines_invoiced(self, cr, uid, ids, context=None):
for po in self.browse(cr, uid, ids, context=context):
is_invoiced = []
if po.invoice_method == 'picking':
# We determine the invoiced state of the PO line based on the invoiced state
# of the associated moves. This should cover all possible cases:
# - all moves are done and invoiced
# - a PO line is split into multiple moves (e.g. if multiple pickings): some
# pickings are done, some are in progress, some are cancelled
for po_line in po.order_line:
if (po_line.move_ids and
all(move.state in ('done', 'cancel') for move in po_line.move_ids) and
not all(move.state == 'cancel' for move in po_line.move_ids) and
all(move.invoice_state == 'invoiced' for move in po_line.move_ids if move.state == 'done')
and po_line.invoice_lines and all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
is_invoiced.append(po_line.id)
elif po_line.product_id.type == 'service':
is_invoiced.append(po_line.id)
else:
for po_line in po.order_line:
if (po_line.invoice_lines and
all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
is_invoiced.append(po_line.id)
if is_invoiced:
self.pool['purchase.order.line'].write(cr, uid, is_invoiced, {'invoiced': True})
workflow.trg_write(uid, 'purchase.order', po.id, cr)
class purchase_order_line(osv.osv):
def _calc_line_base_price(self, cr, uid, line, context=None):
"""Return the base price of the line to be used for tax calculation.
This function can be extended by other modules to modify this base
price (adding a discount, for example).
"""
return line.price_unit
def _calc_line_quantity(self, cr, uid, line, context=None):
"""Return the base quantity of the line to be used for the subtotal.
This function can be extended by other modules to modify this base
quantity (adding for example offers 3x2 and so on).
"""
return line.product_qty
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
line_price = self._calc_line_base_price(cr, uid, line,
context=context)
line_qty = self._calc_line_quantity(cr, uid, line,
context=context)
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line_price,
line_qty, line.product_id,
line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
if not uom_id:
uom_id = self.default_get(cr, uid, ['product_uom'], context=context).get('product_uom', False)
res['value']['product_uom'] = uom_id
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
if not name or not uom_id:
# The 'or not uom_id' part of the above condition can be removed in master. See commit message of the rev. introducing this line.
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
if uid == SUPERUSER_ID:
company_id = self.pool['res.users'].browse(cr, uid, [uid]).company_id.id
taxes = product.supplier_taxes_id.filtered(lambda r: r.company_id.id == company_id)
else:
taxes = product.supplier_taxes_id
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes, context=context)
price = self.pool['account.tax']._fix_tax_included_price(cr, uid, price, product.supplier_taxes_id, taxes_ids)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
purchase_line_obj = self.pool.get('purchase.order.line')
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Error!'),
_('Can not cancel this procurement as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
purchase_line_obj.action_cancel(cr, uid, [procurement.purchase_line_id.id], context=context)
purchase_line_obj.unlink(cr, uid, [procurement.purchase_line_id.id], context=context)
return super(procurement_order, self).propagate_cancel(cr, uid, procurement, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the supplier info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main supplier of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_proc(self, cr, uid, procurement, partner, company, schedule_date, context=None):
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
po_obj = self.pool.get('purchase.order')
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty, seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner.id, dict(context, uom=uom_id))[pricelist_id]
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes_ids = taxes_ids.filtered(lambda x: x.company_id.id == procurement.company_id.id)
# It is necessary to have the appropriate fiscal position to get the right tax mapping
fiscal_position = False
fiscal_position_id = po_obj.onchange_partner_id(cr, uid, None, partner.id, context=context)['value']['fiscal_position']
if fiscal_position_id:
fiscal_position = acc_pos_obj.browse(cr, uid, fiscal_position_id, context=context)
taxes = acc_pos_obj.map_tax(cr, uid, fiscal_position, taxes_ids, context=context)
name = product.display_name
if product.description_purchase:
name += '\n' + product.description_purchase
return {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
}
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_id.uom_po_id.id})[pricelist_id]
return qty, price
def update_origin_po(self, cr, uid, po, proc, context=None):
pass
def make_po(self, cr, uid, ids, context=None):
""" Resolve the purchase from procurement, which may result in a new PO creation, a new PO line creation or a quantity change on existing PO line.
Note that some operations (as the PO creation) are made as SUPERUSER because the current user may not have rights to do it (mto product launched by a sale for example)
@return: dictionary giving for each procurement its related resolving PO line.
"""
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
pass_ids = []
linked_po_ids = []
sum_po_line_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
ctx_company = dict(context or {}, force_company=procurement.company_id.id)
partner = self._get_product_supplier(cr, uid, procurement, context=ctx_company)
if not partner:
self.message_post(cr, uid, [procurement.id], _('There is no supplier associated to product %s') % (procurement.product_id.name))
res[procurement.id] = False
else:
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
line_vals = self._get_po_line_values_from_proc(cr, uid, procurement, partner, company, schedule_date, context=ctx_company)
#look for any other draft PO for the same supplier, to attach the new line on instead of creating a new draft one
available_draft_po_ids = po_obj.search(cr, uid, [
('partner_id', '=', partner.id), ('state', '=', 'draft'), ('picking_type_id', '=', procurement.rule_id.picking_type_id.id),
('location_id', '=', procurement.location_id.id), ('company_id', '=', procurement.company_id.id), ('dest_address_id', '=', procurement.partner_dest_id.id)], context=context)
if available_draft_po_ids:
po_id = available_draft_po_ids[0]
po_rec = po_obj.browse(cr, uid, po_id, context=context)
po_to_update = {'origin': ', '.join(filter(None, set([po_rec.origin, procurement.origin])))}
#if the product has to be ordered earlier those in the existing PO, we replace the purchase date on the order to avoid ordering it too late
if datetime.strptime(po_rec.date_order, DEFAULT_SERVER_DATETIME_FORMAT) > purchase_date:
po_to_update.update({'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
po_obj.write(cr, uid, [po_id], po_to_update, context=context)
#look for any other PO line in the selected PO with same product and UoM to sum quantities instead of creating a new po line
available_po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', po_id), ('product_id', '=', line_vals['product_id']), ('product_uom', '=', line_vals['product_uom'])], context=context)
if available_po_line_ids:
po_line = po_line_obj.browse(cr, uid, available_po_line_ids[0], context=context)
po_line_id = po_line.id
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, po_line=po_line, context=context)
if new_qty > po_line.product_qty:
po_line_obj.write(cr, SUPERUSER_ID, po_line.id, {'product_qty': new_qty, 'price_unit': new_price}, context=context)
self.update_origin_po(cr, uid, po_rec, procurement, context=context)
sum_po_line_ids.append(procurement.id)
else:
line_vals.update(order_id=po_id)
po_line_id = po_line_obj.create(cr, SUPERUSER_ID, line_vals, context=context)
linked_po_ids.append(procurement.id)
else:
name = seq_obj.get(cr, uid, 'purchase.order', context=context) or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner.id,
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'currency_id': partner.property_product_pricelist_purchase and partner.property_product_pricelist_purchase.currency_id.id or procurement.company_id.currency_id.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': po_obj.onchange_partner_id(cr, uid, None, partner.id, context=context)['value']['fiscal_position'],
'payment_term_id': partner.property_supplier_payment_term.id or False,
'dest_address_id': procurement.partner_dest_id.id,
}
po_id = self.create_procurement_purchase_order(cr, SUPERUSER_ID, procurement, po_vals, line_vals, context=dict(context, company_id=po_vals['company_id']))
po_line_id = po_obj.browse(cr, uid, po_id, context=context).order_line[0].id
pass_ids.append(procurement.id)
res[procurement.id] = po_line_id
self.write(cr, uid, [procurement.id], {'purchase_line_id': po_line_id}, context=context)
if pass_ids:
self.message_post(cr, uid, pass_ids, body=_("Draft Purchase Order created"), context=context)
if linked_po_ids:
self.message_post(cr, uid, linked_po_ids, body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
if sum_po_line_ids:
self.message_post(cr, uid, sum_po_line_ids, body=_("Quantity added in existing Purchase Order Line"), context=context)
return res
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
if mail_sent and mail.model == 'purchase.order':
obj = self.pool.get('purchase.order').browse(cr, uid, mail.res_id, context=context)
if obj.state == 'draft':
self.pool.get('purchase.order').signal_workflow(cr, uid, [mail.res_id], 'send_rfq')
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context, mail_sent=mail_sent)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
Purchase = self.pool['purchase.order']
return {
product_id: Purchase.search_count(cr,uid, [('order_line.product_id', '=', product_id)], context=context)
for product_id in ids
}
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice received"), context=context)
purchase_order_obj._set_po_lines_invoiced(cr, user_id, [po_id], context=context)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| MarcosCommunity/odoo | addons/purchase/purchase.py | Python | agpl-3.0 | 93,906 | 0.0064 |
from prompt_toolkit import Application
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.shortcuts import create_eventloop
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
from prompt_toolkit.buffer import Buffer, AcceptAction
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from prompt_toolkit.layout.containers import VSplit, Window, HSplit
from prompt_toolkit.layout.controls import BufferControl, FillControl, TokenListControl
from prompt_toolkit.layout.dimension import LayoutDimension as D
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from pygments.token import Token
def get_titlebar_tokens(cli):
return [
(Token.Title, ' Hello world '),
(Token.Title, ' (Press [Ctrl-Q] to quit.)'),
]
def handle_action(cli, buffer):
' When enter is pressed in the Vi command line. '
text = buffer.text # Remember: leave_command_mode resets the buffer.
buffer.delete_before_cursor(len(text))
cli.focus(DEFAULT_BUFFER)
# First leave command mode. We want to make sure that the working
# pane is focussed again before executing the command handlers.
# self.leave_command_mode(append_to_history=True)
# Execute command.
buffers[DEFAULT_BUFFER].insert_text(text)
buffers = {
DEFAULT_BUFFER: Buffer(is_multiline=True),
'PROMPT': Buffer(
accept_action=AcceptAction(handler=handle_action),
enable_history_search=True,
complete_while_typing=True,
auto_suggest=AutoSuggestFromHistory()),
'RESULT': Buffer(is_multiline=True),
}
def default_buffer_changed(cli):
"""
When the buffer on the left (DEFAULT_BUFFER) changes, update the buffer on
the right. We just reverse the text.
"""
buffers['RESULT'].text = buffers[DEFAULT_BUFFER].text[::-1]
buffers[DEFAULT_BUFFER].on_text_changed += default_buffer_changed
def get_bottom_toolbar_tokens(cli):
return [(Token.Toolbar, ' This is a toolbar. ')]
layout = VSplit([
Window(content=BufferControl(
buffer_name=DEFAULT_BUFFER, focus_on_click=True)),
Window(width=D.exact(1),
content=FillControl('|', token=Token.Line)),
Window(content=BufferControl(buffer_name='RESULT'))
])
layout = HSplit([
Window(height=D.exact(1),
content=TokenListControl(
get_titlebar_tokens, align_center=True
)),
Window(height=D.exact(1),
content=FillControl('-', token=Token.Line)),
layout,
Window(height=D.exact(1),
content=FillControl('-', token=Token.Line)),
Window(height=D.exact(2),
content=BufferControl(
buffer_name='PROMPT',
focus_on_click=True)),
])
loop = create_eventloop()
manager = KeyBindingManager()
registry = manager.registry
@registry.add_binding(Keys.ControlQ, eager=True)
def exit_(event):
event.cli.set_return_value(None)
application = Application(key_bindings_registry=registry, layout=layout,
buffers=buffers,
mouse_support=True,
use_alternate_screen=True,
editing_mode=EditingMode.VI
)
cli = CommandLineInterface(application=application, eventloop=loop)
cli.run()
print("Exiting")
| anhlt/twitter_cli | twitter_cli/main.py | Python | mit | 3,569 | 0.00028 |
"""Defines a sync module for Blink."""
import logging
from requests.structures import CaseInsensitiveDict
from blinkpy import api
from blinkpy.camera import BlinkCamera, BlinkCameraMini, BlinkDoorbell
from blinkpy.helpers.util import time_to_seconds
from blinkpy.helpers.constants import ONLINE
_LOGGER = logging.getLogger(__name__)
class BlinkSyncModule:
"""Class to initialize sync module."""
def __init__(self, blink, network_name, network_id, camera_list):
"""
Initialize Blink sync module.
:param blink: Blink class instantiation
"""
self.blink = blink
self.network_id = network_id
self.region_id = blink.auth.region_id
self.name = network_name
self.serial = None
self.status = "offline"
self.sync_id = None
self.host = None
self.summary = None
self.network_info = None
self.events = []
self.cameras = CaseInsensitiveDict({})
self.motion_interval = blink.motion_interval
self.motion = {}
self.last_record = {}
self.camera_list = camera_list
self.available = False
@property
def attributes(self):
"""Return sync attributes."""
attr = {
"name": self.name,
"id": self.sync_id,
"network_id": self.network_id,
"serial": self.serial,
"status": self.status,
"region_id": self.region_id,
}
return attr
@property
def urls(self):
"""Return device urls."""
return self.blink.urls
@property
def online(self):
"""Return boolean system online status."""
try:
return ONLINE[self.status]
except KeyError:
_LOGGER.error("Unknown sync module status %s", self.status)
self.available = False
return False
@property
def arm(self):
"""Return status of sync module: armed/disarmed."""
try:
return self.network_info["network"]["armed"]
except (KeyError, TypeError):
self.available = False
return None
@arm.setter
def arm(self, value):
"""Arm or disarm camera."""
if value:
return api.request_system_arm(self.blink, self.network_id)
return api.request_system_disarm(self.blink, self.network_id)
def start(self):
"""Initialize the system."""
response = self.sync_initialize()
if not response:
return False
try:
self.sync_id = self.summary["id"]
self.serial = self.summary["serial"]
self.status = self.summary["status"]
except KeyError:
_LOGGER.error("Could not extract some sync module info: %s", response)
is_ok = self.get_network_info()
self.check_new_videos()
if not is_ok or not self.update_cameras():
return False
self.available = True
return True
def sync_initialize(self):
"""Initialize a sync module."""
response = api.request_syncmodule(self.blink, self.network_id)
try:
self.summary = response["syncmodule"]
self.network_id = self.summary["network_id"]
except (TypeError, KeyError):
_LOGGER.error(
"Could not retrieve sync module information with response: %s", response
)
return False
return response
def update_cameras(self, camera_type=BlinkCamera):
"""Update cameras from server."""
try:
for camera_config in self.camera_list:
if "name" not in camera_config:
break
blink_camera_type = camera_config.get("type", "")
name = camera_config["name"]
self.motion[name] = False
owl_info = self.get_owl_info(name)
lotus_info = self.get_lotus_info(name)
if blink_camera_type == "mini":
camera_type = BlinkCameraMini
if blink_camera_type == "lotus":
camera_type = BlinkDoorbell
self.cameras[name] = camera_type(self)
camera_info = self.get_camera_info(
camera_config["id"], owl_info=owl_info, lotus_info=lotus_info
)
self.cameras[name].update(camera_info, force_cache=True, force=True)
except KeyError:
_LOGGER.error("Could not create camera instances for %s", self.name)
return False
return True
def get_owl_info(self, name):
"""Extract owl information."""
try:
for owl in self.blink.homescreen["owls"]:
if owl["name"] == name:
return owl
except (TypeError, KeyError):
pass
return None
def get_lotus_info(self, name):
"""Extract lotus information."""
try:
for doorbell in self.blink.homescreen["doorbells"]:
if doorbell["name"] == name:
return doorbell
except (TypeError, KeyError):
pass
return None
def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop("force", False)
response = api.request_sync_events(self.blink, self.network_id, force=force)
try:
return response["event"]
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s", response)
return False
def get_camera_info(self, camera_id, **kwargs):
"""Retrieve camera information."""
owl = kwargs.get("owl_info", None)
if owl is not None:
return owl
lotus = kwargs.get("lotus_info", None)
if lotus is not None:
return lotus
response = api.request_camera_info(self.blink, self.network_id, camera_id)
try:
return response["camera"][0]
except (TypeError, KeyError):
_LOGGER.error("Could not extract camera info: %s", response)
return {}
def get_network_info(self):
"""Retrieve network status."""
self.network_info = api.request_network_update(self.blink, self.network_id)
try:
if self.network_info["network"]["sync_module_error"]:
raise KeyError
except (TypeError, KeyError):
self.available = False
return False
return True
def refresh(self, force_cache=False):
"""Get all blink cameras and pulls their most recent status."""
if not self.get_network_info():
return
self.check_new_videos()
for camera_name in self.cameras.keys():
camera_id = self.cameras[camera_name].camera_id
camera_info = self.get_camera_info(
camera_id,
owl_info=self.get_owl_info(camera_name),
lotus_info=self.get_lotus_info(camera_name),
)
self.cameras[camera_name].update(camera_info, force_cache=force_cache)
self.available = True
def check_new_videos(self):
"""Check if new videos since last refresh."""
try:
interval = self.blink.last_refresh - self.motion_interval * 60
except TypeError:
# This is the first start, so refresh hasn't happened yet.
# No need to check for motion.
return False
resp = api.request_videos(self.blink, time=interval, page=1)
for camera in self.cameras.keys():
self.motion[camera] = False
try:
info = resp["media"]
except (KeyError, TypeError):
_LOGGER.warning("Could not check for motion. Response: %s", resp)
return False
for entry in info:
try:
name = entry["device_name"]
clip = entry["media"]
timestamp = entry["created_at"]
if self.check_new_video_time(timestamp):
self.motion[name] = True and self.arm
self.last_record[name] = {"clip": clip, "time": timestamp}
except KeyError:
_LOGGER.debug("No new videos since last refresh.")
return True
def check_new_video_time(self, timestamp):
"""Check if video has timestamp since last refresh."""
return time_to_seconds(timestamp) > self.blink.last_refresh
class BlinkOwl(BlinkSyncModule):
"""Representation of a sync-less device."""
def __init__(self, blink, name, network_id, response):
"""Initialize a sync-less object."""
cameras = [{"name": name, "id": response["id"]}]
super().__init__(blink, name, network_id, cameras)
self.sync_id = response["id"]
self.serial = response["serial"]
self.status = response["enabled"]
if not self.serial:
self.serial = f"{network_id}-{self.sync_id}"
def sync_initialize(self):
"""Initialize a sync-less module."""
self.summary = {
"id": self.sync_id,
"name": self.name,
"serial": self.serial,
"status": self.status,
"onboarded": True,
"account_id": self.blink.account_id,
"network_id": self.network_id,
}
return self.summary
def update_cameras(self, camera_type=BlinkCameraMini):
"""Update sync-less cameras."""
return super().update_cameras(camera_type=BlinkCameraMini)
def get_camera_info(self, camera_id, **kwargs):
"""Retrieve camera information."""
try:
for owl in self.blink.homescreen["owls"]:
if owl["name"] == self.name:
self.status = owl["enabled"]
return owl
except (TypeError, KeyError):
pass
return None
def get_network_info(self):
"""Get network info for sync-less module."""
return True
@property
def network_info(self):
"""Format owl response to resemble sync module."""
return {
"network": {
"id": self.network_id,
"name": self.name,
"armed": self.status,
"sync_module_error": False,
"account_id": self.blink.account_id,
}
}
@network_info.setter
def network_info(self, value):
"""Set network_info property."""
class BlinkLotus(BlinkSyncModule):
"""Representation of a sync-less device."""
def __init__(self, blink, name, network_id, response):
"""Initialize a sync-less object."""
cameras = [{"name": name, "id": response["id"]}]
super().__init__(blink, name, network_id, cameras)
self.sync_id = response["id"]
self.serial = response["serial"]
self.status = response["enabled"]
if not self.serial:
self.serial = f"{network_id}-{self.sync_id}"
def sync_initialize(self):
"""Initialize a sync-less module."""
self.summary = {
"id": self.sync_id,
"name": self.name,
"serial": self.serial,
"status": self.status,
"onboarded": True,
"account_id": self.blink.account_id,
"network_id": self.network_id,
}
return self.summary
def update_cameras(self, camera_type=BlinkDoorbell):
"""Update sync-less cameras."""
return super().update_cameras(camera_type=BlinkDoorbell)
def get_camera_info(self, camera_id, **kwargs):
"""Retrieve camera information."""
try:
for doorbell in self.blink.homescreen["doorbells"]:
if doorbell["name"] == self.name:
self.status = doorbell["enabled"]
return doorbell
except (TypeError, KeyError):
pass
return None
def get_network_info(self):
"""Get network info for sync-less module."""
return True
@property
def network_info(self):
"""Format lotus response to resemble sync module."""
return {
"network": {
"id": self.network_id,
"name": self.name,
"armed": self.status,
"sync_module_error": False,
"account_id": self.blink.account_id,
}
}
@network_info.setter
def network_info(self, value):
"""Set network_info property."""
| fronzbot/blinkpy | blinkpy/sync_module.py | Python | mit | 12,603 | 0.000714 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Elivis.Zhang <elivis.zhang@aliyun.com>
# QQ Group:99798703
# Created on Aug 8, 2015
# -*- coding: utf-8 -*-
import MySQLdb
import settings
class db_operate:
def mysql_command(self,conn,sql_cmd):
try:
ret = []
conn=MySQLdb.connect(host=conn["host"],user=conn["user"],passwd=conn["password"],db=conn["database"],port=conn["port"],charset="utf8")
cursor = conn.cursor()
n = cursor.execute(sql_cmd)
for row in cursor.fetchall():
for i in row:
ret.append(i)
except MySQLdb.Error,e:
ret.append(e)
return ret
def select_table(self,conn,sql_cmd,parmas):
try:
ret = []
conn=MySQLdb.connect(host=conn["host"],user=conn["user"],passwd=conn["password"],db=conn["database"],port=conn["port"],charset="utf8")
cursor = conn.cursor()
n = cursor.execute(sql_cmd,parmas)
for row in cursor.fetchall():
for i in row:
ret.append(i)
except MySQLdb.Error,e:
ret.append(e)
return ret
| Elivis/opsa-master | opsa/mysql.py | Python | gpl-2.0 | 1,197 | 0.020084 |
# ####################################################################
# gofed - set of tools to automize packaging of golang devel codes
# Copyright (C) 2014 Jan Chaloupka, jchaloup@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
import re
import os
import urllib2
import optparse
from subprocess import Popen, PIPE
from modules.Utils import GREEN, RED, ENDC
from modules.Packages import packageInPkgdb
from modules.Utils import FormatedPrint
from modules.ImportPath import ImportPath
from modules.ImportPathsDecomposer import ImportPathsDecomposer
from modules.GoSymbolsExtractor import GoSymbolsExtractor
from modules.Config import Config
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [-a] [-c] [-d [-v]] [directory]")
parser.add_option_group( optparse.OptionGroup(parser, "directory", "Directory to inspect. If empty, current directory is used.") )
parser.add_option(
"", "-a", "--all", dest="all", action = "store_true", default = False,
help = "Display all imports including golang native"
)
parser.add_option(
"", "-c", "--classes", dest="classes", action = "store_true", default = False,
help = "Decompose imports into classes"
)
parser.add_option(
"", "-d", "--pkgdb", dest="pkgdb", action = "store_true", default = False,
help = "Check if a class is in the PkgDB (only with -c option)"
)
parser.add_option(
"", "-v", "--verbose", dest="verbose", action = "store_true", default = False,
help = "Show all packages if -d option is on"
)
parser.add_option(
"", "-s", "--short", dest="short", action = "store_true", default = False,
help = "Display just classes without its imports"
)
parser.add_option(
"", "", "--spec", dest="spec", action = "store_true", default = False,
help = "Display import path for spec file"
)
parser.add_option(
"", "-r", "--requires", dest="requires", action = "store_true", default = False,
help = "Use Requires instead of BuildRequires. Used only with --spec option."
)
parser.add_option(
"", "", "--skip-errors", dest="skiperrors", action = "store_true", default = False,
help = "Skip all errors during Go symbol parsing"
)
parser.add_option(
"", "", "--importpath", dest="importpath", default = "",
help = "Don't display class belonging to IMPORTPATH prefix"
)
parser.add_option(
"", "", "--scan-all-dirs", dest="scanalldirs", action = "store_true", default = False,
help = "Scan all dirs, including Godeps directory"
)
parser.add_option(
"", "", "--skip-dirs", dest="skipdirs", default = "",
help = "Scan all dirs except specified via SKIPDIRS. Directories are comma separated list."
)
parser.add_option(
"", "", "--all-occurrences", dest="alloccurrences", action = "store_true", default = False,
help = "List imported paths in all packages including main. Default is skip main packages."
)
parser.add_option(
"", "", "--show-occurrence", dest="showoccurrence", action = "store_true", default = False,
help = "Show occurence of import paths."
)
options, args = parser.parse_args()
path = "."
if len(args):
path = args[0]
fmt_obj = FormatedPrint()
if not options.scanalldirs:
noGodeps = Config().getSkippedDirectories()
else:
noGodeps = []
if options.skipdirs:
for dir in options.skipdirs.split(','):
dir = dir.strip()
if dir == "":
continue
noGodeps.append(dir)
gse_obj = GoSymbolsExtractor(path, imports_only=True, skip_errors=options.skiperrors, noGodeps=noGodeps)
if not gse_obj.extract():
fmt_obj.printError(gse_obj.getError())
exit(1)
package_imports_occurence = gse_obj.getPackageImportsOccurences()
ip_used = gse_obj.getImportedPackages()
ipd = ImportPathsDecomposer(ip_used)
if not ipd.decompose():
fmt_obj.printError(ipd.getError())
exit(1)
warn = ipd.getWarning()
if warn != "":
fmt_obj.printWarning("Warning: %s" % warn)
classes = ipd.getClasses()
sorted_classes = sorted(classes.keys())
# get max length of all imports
max_len = 0
for element in sorted_classes:
if element == "Native":
continue
# class name starts with prefix => filter out
if options.importpath != "" and element.startswith(options.importpath):
continue
gimports = []
for gimport in classes[element]:
if options.importpath != "" and gimport.startswith(options.importpath):
continue
gimports.append(gimport)
for gimport in gimports:
import_len = len(gimport)
if import_len > max_len:
max_len = import_len
if options.spec and options.showoccurrence:
print "# THIS IS NOT A VALID SPEC FORMAT"
print "# COMMENTS HAS TO BE STARTED AT THE BEGGINING OF A LINE"
for element in sorted_classes:
if not options.all and element == "Native":
continue
if not options.alloccurrences:
one_class = []
for gimport in classes[element]:
# does it occur only in main package?
# remove it from classes[element]
skip = True
if gimport in package_imports_occurence:
for occurrence in package_imports_occurence[gimport]:
if not occurrence.endswith(":main"):
skip = False
break
if skip:
continue
one_class.append(gimport)
classes[element] = sorted(one_class)
# class name starts with prefix => filter out
if options.importpath != "" and element.startswith(options.importpath):
continue
# filter out all members of a class prefixed by prefix
gimports = []
for gimport in classes[element]:
if options.importpath != "" and gimport.startswith(options.importpath):
continue
gimports.append(gimport)
if gimports == []:
continue
if options.classes:
# Native class is just printed
if options.all and element == "Native":
# does not make sense to check Native class in PkgDB
if options.pkgdb:
continue
print "Class: %s" % element
if not options.short:
for gimport in gimports:
if options.showoccurrence:
print "\t%s (%s)" % (gimport, ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
continue
# Translate non-native class into package name (if -d option)
if options.pkgdb:
ip_obj = ImportPath(element)
if not ip_obj.parse():
fmt_obj.printWarning("Unable to translate %s to package name" % element)
continue
pkg_name = ip_obj.getPackageName()
if pkg_name == "":
fmt_obj.printWarning(ip_obj.getError())
pkg_in_pkgdb = packageInPkgdb(pkg_name)
if pkg_in_pkgdb:
if options.verbose:
print (GREEN + "Class: %s (%s) PkgDB=%s" + ENDC) % (element, pkg_name, pkg_in_pkgdb)
else:
print (RED + "Class: %s (%s) PkgDB=%s" + ENDC ) % (element, pkg_name, pkg_in_pkgdb)
continue
# Print class
print "Class: %s" % element
if not options.short:
for gimport in sorted(gimports):
if options.showoccurrence:
print "\t%s (%s)" % (gimport, ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
continue
# Spec file BR
if options.spec:
for gimport in sorted(classes[element]):
if options.requires:
if options.showoccurrence:
import_len = len(gimport)
print "Requires: golang(%s) %s# %s" % (gimport, (max_len - import_len)*" ", ", ".join(package_imports_occurence[gimport]))
else:
print "Requires: golang(%s)" % gimport
else:
if options.showoccurrence:
import_len = len(gimport)
print "BuildRequires: golang(%s) %s# %s" % (gimport, (max_len - import_len)*" ", ", ".join(package_imports_occurence[gimport]))
else:
print "BuildRequires: golang(%s)" % gimport
continue
# Just a list of all import paths
for gimport in sorted(classes[element]):
if options.showoccurrence:
import_len = len(gimport)
print "\t%s %s(%s)" % (gimport, (max_len - import_len)*" ", ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
| piotr1212/gofed | ggi.py | Python | gpl-2.0 | 8,925 | 0.032381 |
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import unittest
from unittest import mock
except ImportError:
import mock
import unittest2 as unittest
from acos_client import client
import acos_client.errors as acos_errors
import json
import responses
HOSTNAME = 'fake_a10'
BASE_URL = 'https://{}:443/axapi/v3'.format(HOSTNAME)
AUTH_URL = '{}/auth'.format(BASE_URL)
VSERVER_NAME = 'test'
CREATE_URL = '{}/slb/virtual-server/{}/port/'.format(BASE_URL, VSERVER_NAME)
OBJECT_URL = '{}/slb/virtual-server/{}/port/80+http'.format(BASE_URL, VSERVER_NAME)
ALL_URL = '{}/slb/virtual-server/{}/port/'.format(BASE_URL, VSERVER_NAME)
class TestVirtualPort(unittest.TestCase):
def setUp(self):
self.client = client.Client(HOSTNAME, '30', 'fake_username', 'fake_password')
self.maxDiff = None
@responses.activate
def test_virtual_port_create_no_params(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
params = {
'port':
{
'extended-stats': 1,
'name': 'test1_VPORT',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1'
}
}
resp = self.client.slb.virtual_server.vport.create(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80',
service_group_name='pool1'
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@responses.activate
def test_virtual_port_create_with_params(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'ha-conn-mirror': 1,
'no-dest-nat': 1,
'conn-limit': 50000,
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template'
}
}
resp = self.client.slb.virtual_server.vport.create(
virtual_server_name=VSERVER_NAME,
name='test1_VPORT',
protocol=self.client.slb.virtual_server.vport.HTTP,
port='80',
service_group_name='pool1',
s_pers_name="test_s_pers_template",
c_pers_name="test_c_pers_template",
ha_conn_mirror=1,
no_dest_nat=1,
conn_limit=50000,
status=1,
autosnat=True,
ipinip=True,
source_nat_pool="test_nat_pool",
tcp_template="test_tcp_template",
udp_template="test_udp_template",
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@responses.activate
def test_virtual_port_create_already_exists(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {
"response": {"status": "fail", "err": {"code": 1406, "msg": "The virtual port already exists."}}
}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
with self.assertRaises(acos_errors.ACOSException):
self.client.slb.virtual_server.vport.create(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80',
service_group_name='pool1'
)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_update_no_params(self, mocked_get):
mocked_get.return_value = {"foo": "bar"}
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {"foo": "bar"}
responses.add(responses.POST, OBJECT_URL, json=json_response, status=200)
params = {
"port":
{
"name": "test1_VPORT",
"service-group": "pool1",
"protocol": "http",
"port-number": 80,
"template-persist-source-ip": None,
"template-persist-cookie": None,
"extended-stats": 1,
}
}
resp = self.client.slb.virtual_server.vport.update(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80',
service_group_name='pool1'
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_create_with_templates(self, mocked_get):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
protocol = self.client.slb.virtual_server.vport.HTTP
if protocol.lower() == 'http':
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-http': None,
'template-policy': 'template_pl',
}
}
else:
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-tcp': 'template_tcp',
'template-policy': 'template_pl',
}
}
resp = self.client.slb.virtual_server.vport.create(
virtual_server_name=VSERVER_NAME,
name='test1_VPORT',
protocol=self.client.slb.virtual_server.vport.HTTP,
port='80',
service_group_name='pool1',
s_pers_name="test_s_pers_template",
c_pers_name="test_c_pers_template",
status=1,
autosnat=True,
ipinip=True,
source_nat_pool="test_nat_pool",
tcp_template="test_tcp_template",
udp_template="test_udp_template",
virtual_port_templates={
'template-virtual-port': 'template_vp',
'template-tcp': 'template_tcp',
'template-policy': 'template_pl',
},
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_create_with_partial_templates(self, mocked_get):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
protocol = self.client.slb.virtual_server.vport.HTTP
if protocol.lower() == 'http':
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-http': None,
'template-policy': None,
}
}
else:
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-tcp': None,
'template-policy': None,
}
}
resp = self.client.slb.virtual_server.vport.create(
virtual_server_name=VSERVER_NAME,
name='test1_VPORT',
protocol=self.client.slb.virtual_server.vport.HTTP,
port='80',
service_group_name='pool1',
s_pers_name="test_s_pers_template",
c_pers_name="test_c_pers_template",
status=1,
autosnat=True,
ipinip=True,
source_nat_pool="test_nat_pool",
tcp_template="test_tcp_template",
udp_template="test_udp_template",
virtual_port_templates={
'template-virtual-port': 'template_vp'
},
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_update_with_params(self, mocked_get):
mocked_get.return_value = {"foo": "bar"}
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {"foo": "bar"}
responses.add(responses.POST, OBJECT_URL, json=json_response, status=200)
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'name': 'test1_VPORT',
'ipinip': 1,
'no-dest-nat': 1,
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'ha-conn-mirror': 1,
'conn-limit': 50000,
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
}
}
resp = self.client.slb.virtual_server.vport.update(
virtual_server_name=VSERVER_NAME,
name='test1_VPORT',
protocol=self.client.slb.virtual_server.vport.HTTP,
port='80',
service_group_name='pool1',
s_pers_name="test_s_pers_template",
c_pers_name="test_c_pers_template",
status=1,
autosnat=True,
ipinip=True,
ha_conn_mirror=1,
no_dest_nat=1,
conn_limit=50000,
source_nat_pool="test_nat_pool",
tcp_template="test_tcp_template",
udp_template="test_udp_template",
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_update_with_templates(self, mocked_get):
mocked_get.return_value = {"foo": "bar"}
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {"foo": "bar"}
responses.add(responses.POST, OBJECT_URL, json=json_response, status=200)
protocol = self.client.slb.virtual_server.vport.HTTP
if protocol.lower() == 'http':
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'name': 'test1_VPORT',
'ipinip': 1,
'no-dest-nat': 0,
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'ha-conn-mirror': 1,
'conn-limit': 50000,
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-http': None,
'template-policy': None,
}
}
else:
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'name': 'test1_VPORT',
'ipinip': 1,
'no-dest-nat': 0,
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'ha-conn-mirror': 1,
'conn-limit': 50000,
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-tcp': None,
'template-policy': None,
}
}
resp = self.client.slb.virtual_server.vport.update(
virtual_server_name=VSERVER_NAME,
name='test1_VPORT',
protocol=self.client.slb.virtual_server.vport.HTTP,
port='80',
service_group_name='pool1',
s_pers_name="test_s_pers_template",
c_pers_name="test_c_pers_template",
status=1,
autosnat=True,
ipinip=True,
ha_conn_mirror=1,
no_dest_nat=0,
conn_limit=50000,
source_nat_pool="test_nat_pool",
tcp_template="test_tcp_template",
udp_template="test_udp_template",
virtual_port_templates={
'template-virtual-port': 'template_vp'
},
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@responses.activate
def test_virtual_port_delete(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {
'response': {'status': 'OK'}
}
responses.add(responses.DELETE, OBJECT_URL, json=json_response, status=200)
resp = self.client.slb.virtual_server.vport.delete(
VSERVER_NAME, 'test1_VPORT', self.client.slb.virtual_server.vport.HTTP, '80'
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.DELETE)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
@responses.activate
def test_virtual_port_delete_not_found(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {
"response": {"status": "fail", "err": {"code": 1043, "msg": "Can not find the virtual server port"}}
}
responses.add(responses.DELETE, OBJECT_URL, json=json_response, status=200)
with self.assertRaises(acos_errors.ACOSException):
self.client.slb.virtual_server.vport.delete(
VSERVER_NAME, 'test1_VPORT', self.client.slb.virtual_server.vport.HTTP, '80'
)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.DELETE)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
@responses.activate
def test_virtual_port_search(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {"foo": "bar"}
responses.add(responses.GET, OBJECT_URL, json=json_response, status=200)
resp = self.client.slb.virtual_server.vport.get(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80'
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.GET)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
@responses.activate
def test_virtual_port_search_not_found(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {
"response": {"status": "fail", "err": {"code": 1043, "msg": "Can not find the virtual server port"}}
}
responses.add(responses.GET, OBJECT_URL, json=json_response, status=200)
with self.assertRaises(acos_errors.ACOSException):
self.client.slb.virtual_server.vport.get(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80'
)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.GET)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
@responses.activate
def test_virtual_port_all(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {"foo": "bar"}
responses.add(responses.GET, ALL_URL, json=json_response, status=200)
resp = self.client.slb.virtual_server.vport.all(VSERVER_NAME)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.GET)
self.assertEqual(responses.calls[1].request.url, ALL_URL)
| mdurrant-b3/acos-client | acos_client/tests/unit/v30/test_slb_virtual_port.py | Python | apache-2.0 | 22,343 | 0.001074 |
"""
Provides classes that represent complete taxonomies, built using components from
the taxacomponents module.
"""
from taxacomponents import Citation, RankTable, Taxon
from taxonvisitor import TaxonVisitor
from taxonvisitors_concrete import PrintTaxonVisitor, CSVTaxonVisitor
from nameresolve import CoLNamesResolver
class TaxonomyError(Exception):
"""
A basic exception class for reporting errors encountered while working with taxonomies.
"""
def __init__(self, msg):
msg = 'Taxonomy error:\n ' + msg
Exception.__init__(self, msg)
class TaxonomyBase:
# Define the "nil" UUID constant as returned by the uuid-osp Postgres module
# function uuid_nil().
#NIL_UUID = '00000000-0000-0000-0000-000000000000'
NIL_UUID = 0
def __init__(self, taxonomy_id, name='', ismaster=False, citation=None, roottaxon=None):
self.taxonomy_id = taxonomy_id
self.name = name
self.ismaster = ismaster
self.citation = citation
self.roottaxon = roottaxon
def loadFromDB(self, pgcur, taxanum=-1, maxdepth=-1):
"""
Attempts to load the taxonomy from a taxonomy database, including the full tree
of taxa. If taxanum > 0, then only taxanum taxa will be loaded. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
query = """SELECT name, citation_id, ismaster, root_tc_id
FROM taxonomies
WHERE taxonomy_id=?"""
pgcur.execute(query, (self.taxonomy_id,))
res = pgcur.fetchone()
if res == None:
raise TaxonomyError('Taxonomy ID ' + str(self.taxonomy_id) + ' was not found in the database.')
self.name = res[0]
self.ismaster = res[2]
roottc_id = res[3]
# Create the Citation object.
self.citation = Citation()
self.citation.loadFromDB(pgcur, res[1])
# Get the rank ID and taxonomy ID of the root taxon concept.
query = """SELECT tc.rank_id, tc.taxonomy_id
FROM taxon_concepts tc, ranks r
WHERE tc.tc_id=? AND tc.rank_id=r.rank_id"""
pgcur.execute(query, (roottc_id,))
res = pgcur.fetchone()
rankid = res[0]
root_taxonomy_id = res[1]
# Initialize the rank lookup table.
rankt = RankTable()
rankt.loadFromDB(pgcur)
# Load the taxa tree.
self.roottaxon = Taxon(self.taxonomy_id, rankid, rankt, roottaxo_id = root_taxonomy_id, isroot=True)
self.roottaxon.loadFromDB(pgcur, roottc_id, taxanum, maxdepth)
def persist(self):
"""
Persist the Taxonomy to the database. This method should be implemented by
concrete subclasses.
"""
pass
def __str__(self):
tstr = 'name: ' + self.name + '\nID: ' + str(self.taxonomy_id) + '\nmaster: '
if self.ismaster:
tstr += 'yes'
else:
tstr += 'no'
return tstr
def printTaxonomyInfo(self):
"""
Prints the metadata that describes this taxonomy.
"""
print '** Taxonomy information **'
print str(self)
print str(self.citation)
def printCSVTaxaTree(self, numtaxa=-1, maxdepth=-1):
"""
Prints the tree of taxa for this taxonomy in "flat" format as CSV outut. If
numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
if numtaxa > 0:
print '(Only printing first', numtaxa, 'taxa.)'
if maxdepth > -1:
print '(Only traversing taxa tree to a depth of ' + str(maxdepth) + '.)'
csvvisitor = CSVTaxonVisitor(numtaxa, maxdepth)
csvvisitor.visit(self.roottaxon)
def printTaxaTree(self, numtaxa=-1, maxdepth=-1):
"""
Prints the tree of taxa for this taxonomy. If numtaxa > 0, only the first numtaxa
taxa will be printed. If maxdepth > -1, the taxa tree will only be traversed to a
depth of maxdepth.
"""
print '** Taxa tree **'
if numtaxa > 0:
print '(Only printing first', numtaxa, 'taxa.)'
if maxdepth > -1:
print '(Only traversing taxa tree to a depth of ' + str(maxdepth) + '.)'
ptvisitor = PrintTaxonVisitor(numtaxa, maxdepth)
ptvisitor.visit(self.roottaxon)
def printAll(self, numtaxa=-1, maxdepth=-1):
"""
Prints a text representation of this taxonomy, including the tree of taxa.
If numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
self.printTaxonomyInfo()
print
self.printTaxaTree(numtaxa, maxdepth)
class Taxonomy(TaxonomyBase):
"""
A class that represents a single taxonomy in the MOL taxonomy database. Provides methods
to load a taxonomy from the database and persist a taxonomy to the database. Can also link
a taxonomy to the backbone taxonomy.
"""
def __init__(self, taxonomy_id, name='', ismaster=False, citation=None, roottaxon=None):
TaxonomyBase.__init__(self, taxonomy_id, name, ismaster, citation, roottaxon)
# A reference for the backbone taxonomy, which encompasses all other taxonomies.
# This reference is used if this taxonomy is linked to the backbone taxonomy.
self.bb_taxonomy = None
def linkToBackbone(self, pgcur, adjustdepth=True):
"""
Tries to connect this taxonomy to the backbone taxonomy, creating new nodes
in the backbone taxonomy, if needed, to link the two together. If adjustdepth
is True, the depth property of all nodes in the taxonomy are set to match the
correct depth relative to the root of the backbone taxonomy. Returns True if
the linking operation succeeded, False otherwise.
"""
bb_taxonomy = BackboneTaxonomy(pgcur)
if bb_taxonomy.linkTaxonomy(self):
self.bb_taxonomy = bb_taxonomy
if adjustdepth:
self.bb_taxonomy.setNodeDepths()
return True
else:
self.bb_taxonomy = None
return False
def getBackboneTaxonomy(self):
"""
Returns a reference to the backbone taxonomy object that links this taxonomy
to the MOL backbone taxonomy.
"""
return self.bb_taxonomy
def persist(self, pgcur, printprogress=False):
"""
Writes the taxonomy information to the database, if it does not already
exist. This includes calling the persist() methods on the Citation and
Taxon tree associated with this Taxonomy object.
"""
# First, check if this taxonomy already exists in the database.
query = """SELECT taxonomy_id
FROM taxonomies
WHERE taxonomy_id=? AND ismaster=?"""
pgcur.execute(query, (self.taxonomy_id, self.ismaster))
res = pgcur.fetchone()
if res == None:
# Write the citation information to the database, if needed.
citation_id = self.citation.persist(pgcur)
# Create the initial database entry for the taxonomy metadata so that the
# foreign key constraint for the child taxon concepts can be satisfied.
query = """INSERT INTO taxonomies
(taxonomy_id, name, citation_id, ismaster, root_tc_id)
VALUES (?, ?, ?, ?, ?)"""
pgcur.execute(query, (self.taxonomy_id, self.name, citation_id, self.ismaster, None))
# Make sure all taxon concepts, including those from the backbone taxonomy,
# are persisted to the database. Use the "nil" UUID as the parent_id for
# the root of the taxonomy if there is not an existing root entry.
if self.bb_taxonomy != None:
self.bb_taxonomy.roottaxon.persist(pgcur, self.NIL_UUID, printprogress,
self.roottaxon.depth)
else:
self.roottaxon.persist(pgcur, self.NIL_UUID, printprogress, self.roottaxon.depth)
# Get the ID of the root taxon.
root_tcid = self.roottaxon.existsInDB(pgcur)
# Update the taxonomy metadata entry with the root taxon concept ID.
query = """UPDATE taxonomies
SET root_tc_id=?
WHERE taxonomy_id=?"""
pgcur.execute(query, (root_tcid, self.taxonomy_id))
pgcur.connection.commit()
elif printprogress:
print ('The metadata for taxonomy "' + self.name + '" (ID ' + str(self.taxonomy_id) +
') already exist in the database; no changes were made.')
def printAll(self, numtaxa=-1, maxdepth=-1):
"""
Prints a text representation of this taxonomy, including the tree of taxa.
If numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth. Unlike the method
in the base class, this method accounts for the possibility of this taxonomy
being linked to the backbone taxonomy.
"""
self.printTaxonomyInfo()
print
if self.bb_taxonomy != None:
self.bb_taxonomy.printTaxaTree(numtaxa, maxdepth)
else:
self.printTaxaTree(numtaxa, maxdepth)
class DepthAdjustVisitor(TaxonVisitor):
"""
Sets the "depth" values for all Taxon objects in a taxa tree, using an initial
starting depth value.
"""
def __init__(self, startdepth):
"""
Assigns startdepth as the "depth" value for the top-level Taxon object. All
other "depth" values are calculated relative to startdepth.
"""
TaxonVisitor.__init__(self)
self.startdepth = startdepth
def processTaxon(self, taxon, depth):
taxon.depth = self.startdepth + depth
class BackboneTaxonomy(TaxonomyBase):
"""
A special case of Taxonomy that represents the MOL backbone taxonomy. Provides
methods to link other taxonomies to the backbone taxonomy. Does not provide a
persist() method because the backbone taxonomy metadata are set when the database
tables are created.
"""
def __init__(self, pgcur):
"""
Initialize the backbone Taxonomy object and automatically load it from the
database, but load only the root node by default.
"""
self.pgcur = pgcur
# The ID of the backbone taxonomy is always 1.
TaxonomyBase.__init__(self, 1)
self.loadFromDB(pgcur)
def loadFromDB(self, pgcur, taxanum=-1, maxdepth=0):
"""
Exactly the same as loadFromDB() from the superclass, except loads only the root
taxonomy node (i.e., Eukaryota) by default.
"""
TaxonomyBase.loadFromDB(self, pgcur, taxanum, maxdepth)
def linkTaxonomy(self, taxonomy):
"""
Given a Taxonomy object, this method searches for the root taxon
concept in the database, verifies whether it is already connected to
the MOL backbone taxonomy, and if not, attempts to create the Taxon
objects needed to link it to the backbone taxonomy. To do this, the
method loads all ancestors of the root of the provided taxonomy, and
checks if the top-most ancestor is the root of the backbone taxonomy.
If it not, then Catalog of Life is used to try to infer the missing
taxon nodes that connect the target taxonomy to the backbone taxonomy.
If the linking is succesful, the method returns True; otherwise, False
is returned.
"""
# Load any parent links to the target taxonomy from the database.
topnode = self.getLinksFromDB(taxonomy)
# See if we made it back to the root of the backbone taxonomy.
if topnode.equals(self.roottaxon):
# We did, so simply link the child of the returned node to our root taxon.
self.roottaxon.addChild(topnode.children[0])
success = True
else:
# Otherwise, try to use Catalog of Life to fill in any missing links.
success = self._buildCoLLinks(topnode)
return success
def _buildCoLLinks(self, taxon):
"""
Uses Catalog of Life to fill in missing taxa needed to link the target taxon to the
MOL backbone taxonomy. If linking was successful, the target taxon will be connected
to the backbone root taxon by one or more linking taxa. Returns True on success;
False otherwise.
"""
# Use the Catalog of Life names resolver to try to get higher taxonomy information
# for the taxon.
resolver = CoLNamesResolver()
searchres = resolver.searchCoLForTaxon(taxon, taxon.name.namestr, True)
if searchres == None:
return False
res, sname, srank, authorinfo = searchres
# Process each parent taxon in the CoL classification, creating a chain of Taxon
# objects to capture the higher taxonomy. Because the name resolver search method
# verifies that the kingdom is correct, we already know that we are connecting the
# taxonomy to the correct kingdom.
taxaxml = res.find('./classification')
# It is important that we use the rank system from the taxonomy (not the backbone)
# to ensure that rank name lookups retrieve the correct ID.
tranksys = taxon.ranksys
ranktable = taxon.rankt
curnode = self.roottaxon
for taxonxml in taxaxml:
namestr = taxonxml.find('name').text
rankstr = taxonxml.find('rank').text
child = curnode.createChild(ranktable.getID(rankstr, tranksys), namestr)
#print child
curnode = child
# Link the root of the target taxonomy to the backbone taxonomy.
curnode.addChild(taxon)
return True
def getLinksFromDB(self, taxonomy):
"""
Starting from the root node of the provided taxonomy, follows parent
links upward, building a chain of taxon objects until the top-most
parent is reached. Returns the top-most node that could be reached by
following the links upward.
"""
# See if the root taxon_concept already has a parent.
curnode = taxonomy.roottaxon
parent_id = taxonomy.roottaxon.getParentIDFromDB(self.pgcur)
# Follow parent links upwards until we reach the root or any other node
# that has no parent or does not yet exist in the database.
while parent_id != None and parent_id != self.NIL_UUID:
# Create the parent node and load it from the database.
parent = Taxon(curnode.taxonomy_id, curnode.rank_id, curnode.rankt)
parent.loadFromDB(self.pgcur, parent_id, maxdepth=0)
parent.addChild(curnode)
curnode = parent
parent_id = curnode.getParentIDFromDB(self.pgcur)
return curnode
def setNodeDepths(self):
"""
After linking a new taxonomy to the backbone taxonomy, the values of the depth
properties on the Taxon objects in the target taxonomy are likely to be incorrect.
This method will visit all nodes and set the correct value of the depth property
for each node.
"""
depthvisitor = DepthAdjustVisitor(0)
depthvisitor.visit(self.roottaxon)
| stuckyb/sqlite_taxonomy | utilities/taxolib/taxonomy.py | Python | gpl-3.0 | 15,593 | 0.005066 |
from gwpy.plot import Plot
plot = Plot(noise, signal, data, separate=True, sharex=True, sharey=True)
plot.gca().set_epoch(0)
plot.show() | gwpy/gwpy.github.io | docs/latest/examples/timeseries/inject-5.py | Python | gpl-3.0 | 136 | 0.007353 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author Pradeep Jairamani; github.com/pradeepjairamani
import socket
import socks
import time
import json
import threading
import string
import requests
import random
import os
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
from core._die import __die_failure
from lib.scan.wp_timthumbs import wp_timthumbs
from lib.payload.wordlists import useragents
from core.compatible import version
def extra_requirements_dict():
return {
"wp_timthumb_scan_http_method": ["GET"],
"wp_timthumb_scan_random_agent": ["True"],
}
def check(target, user_agent, timeout_sec, log_in_file, language, time_sleep, thread_tmp_filename, retries,
http_method, socks_proxy, scan_id, scan_cmd):
status_codes = [200, 401, 403]
directory_listing_msgs = ["<title>Index of /", "<a href=\"\\?C=N;O=D\">Name</a>", "Directory Listing for",
"Parent Directory</a>", "Last modified</a>", "<TITLE>Folder Listing.",
"- Browsing directory "]
time.sleep(time_sleep)
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
n = 0
while 1:
try:
if http_method == "GET":
r = requests.get(
target, timeout=timeout_sec, headers=user_agent)
elif http_method == "HEAD":
r = requests.head(
target, timeout=timeout_sec, headers=user_agent)
content = r.content
break
except:
n += 1
if n == retries:
warn(messages(language, "http_connection_timeout").format(target))
return 1
if version() == 3:
content = content.decode('utf8')
if r.status_code in status_codes:
info(messages(language, "found").format(
target, r.status_code, r.reason))
__log_into_file(thread_tmp_filename, 'w', '0', language)
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',
'PORT': "", 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "found").format(target, r.status_code, r.reason),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
if r.status_code == 200:
for dlmsg in directory_listing_msgs:
if dlmsg in content:
info(messages(language, "directoy_listing").format(target))
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',
'PORT': "", 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "directoy_listing").format(target), 'TIME': now(),
'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
break
return True
except:
return False
def test(target, retries, timeout_sec, user_agent, http_method, socks_proxy, verbose_level, trying, total_req, total,
num, language):
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target_to_host(target), "default_port",
'wp_timthumb_scan'))
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
n = 0
while 1:
try:
if http_method == "GET":
r = requests.get(target, timeout=timeout_sec,
headers=user_agent)
elif http_method == "HEAD":
r = requests.head(target, timeout=timeout_sec,
headers=user_agent)
return 0
except:
n += 1
if n == retries:
return 1
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(
target) != 'HTTP' or target_type(target) != 'SINGLE_IPv6':
# rand useragent
user_agent_list = useragents.useragents()
http_methods = ["GET", "HEAD"]
user_agent = {'User-agent': random.choice(user_agent_list)}
# requirements check
new_extra_requirements = extra_requirements_dict()
if methods_args is not None:
for extra_requirement in extra_requirements_dict():
if extra_requirement in methods_args:
new_extra_requirements[
extra_requirement] = methods_args[extra_requirement]
extra_requirements = new_extra_requirements
if extra_requirements["wp_timthumb_scan_http_method"][0] not in http_methods:
warn(messages(language, "wp_timthumb_scan_get"))
extra_requirements["wp_timthumb_scan_http_method"] = ["GET"]
random_agent_flag = True
if extra_requirements["wp_timthumb_scan_random_agent"][0] == "False":
random_agent_flag = False
threads = []
total_req = len(wp_timthumbs.timthumb())
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
if target_type(target) != "HTTP":
target = 'https://' + target
if test(str(target), retries, timeout_sec, user_agent, extra_requirements["wp_timthumb_scan_http_method"][0],
socks_proxy, verbose_level, trying, total_req, total, num, language) == 0:
keyboard_interrupt_flag = False
scan_list = wp_timthumbs.timthumb()
for idir in scan_list:
if random_agent_flag:
user_agent = {'User-agent': random.choice(user_agent_list)}
t = threading.Thread(target=check,
args=(
target + '/' + idir, user_agent, timeout_sec, log_in_file, language,
time_sleep, thread_tmp_filename, retries,
extra_requirements[
"wp_timthumb_scan_http_method"][0],
socks_proxy, scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target + "/" + idir,
"default_port", 'wp_timthumb_scan'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
keyboard_interrupt_flag = True
break
if keyboard_interrupt_flag:
break
else:
warn(messages(language, "open_error").format(target))
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) != 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() == 1 or kill_switch == kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write == 1:
info(messages(language, "directory_file_404").format(
target, "default_port"))
if verbose_level != 0:
data = json.dumps(
{'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "no_open_ports"), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(
'wp_timthumb_scan', target))
| Nettacker/Nettacker | lib/scan/wp_timthumbs/engine.py | Python | gpl-3.0 | 10,981 | 0.003461 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
import_csv.py
~~~~~~~~~~~~~~~~~~~~
Import csv file into database.
The CSV file should be in this format:
Datetime;price;quantity;mileage
2013/10/03 07:00:00;34.01;25.90;149340
:copyright: (c) 2014 by Patrick Rabu.
:license: GPL-3, see LICENSE for more details.
"""
import sys
import time
import datetime
import locale
import csv
import sqlite3
csvfile = sys.argv[1]
db = sqlite3.connect(sys.argv[2])
car_id = sys.argv[3]
cursor = db.cursor()
with open(csvfile, 'rb') as f:
locale.setlocale(locale.LC_ALL, 'fra_fra')
reader = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
reader.next() # Skip the first row
for row in reader:
dt = datetime.datetime.strptime(row[0], "%Y/%m/%d %H:%M:%S")
price = locale.atof(row[1])
quantity = locale.atof(row[2])
mileage = locale.atoi(row[3])
cursor.execute('''insert into refills(datetime, quantity, price, mileage, car_id)
values (?, ?, ?, ?, ?)''', (dt, quantity, price, mileage, car_id))
db.commit()
db.close()
| patrabu/carbu | import_csv.py | Python | gpl-3.0 | 1,167 | 0.007712 |
'Automated Valuation Model'
import pdb
import numpy as np
import pandas as pd
from pprint import pprint
import sklearn
import sklearn.ensemble
import sklearn.linear_model
import sklearn.preprocessing
from columns_contain import columns_contain
import AVM_elastic_net
import AVM_gradient_boosting_regressor
import AVM_random_forest_regressor
from Features import Features
cc = columns_contain
def avm_scoring(estimator, df):
'return error from using fitted estimator with test data in the dataframe'
# TODO: make a static method of class AVM
assert isinstance(estimator, AVM)
X, y = estimator.extract_and_transform(df)
assert len(y) > 0
y_hat = estimator.predict(df)
errors = y_hat - y
median_abs_error = np.median(np.abs(errors))
return -median_abs_error # because GridSearchCV chooses the model with the highest score
class AVM(sklearn.base.BaseEstimator):
'one estimator for several underlying models'
def __init__(self,
model_name=None, # parameters for all models
forecast_time_period=None,
n_months_back=None,
random_state=None,
verbose=0,
features_group=None,
implementation_module=None,
alpha=None, # for ElasticNet
l1_ratio=None,
units_X=None,
units_y=None,
n_estimators=None, # for RandomForestRegressor
max_depth=None,
max_features=None,
learning_rate=None, # for GradientBoostingRegressor
loss=None,
):
# NOTE: just capture the parameters (to conform to the sklearn protocol)
self.model_name = model_name
self.forecast_time_period = forecast_time_period
self.n_months_back = n_months_back
self.random_state = random_state
self.verbose = verbose
self.features_group = features_group
self.implementation_module = implementation_module
self.alpha = alpha
self.l1_ratio = l1_ratio
self.units_X = units_X
self.units_y = units_y
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_features = max_features
self.learning_rate = learning_rate
self.loss = loss
def fit(self, samples):
'convert samples to X,Y and fit them'
self.implementation_module = {
'ElasticNet': AVM_elastic_net,
'GradientBoostingRegressor': AVM_gradient_boosting_regressor,
'RandomForestRegressor': AVM_random_forest_regressor,
}[self.model_name]
X_train, y_train = self.extract_and_transform(samples)
fitted = self.implementation_module.fit(self, X_train, y_train)
return fitted.model # scikit learn's fitted model
def get_attributes(self):
'return both sets of attributes, with None if not used by that model'
pdb.set_trace()
attribute_names = (
'coef_', 'sparse_coef_', 'intercept_', 'n_iter_', # for linear
'estimators_', 'feature_importances_', 'oob_score_', 'oob_prediction_', # for random forest
)
return {name: getattr(self.model, name, None) for name in attribute_names}
def extract_and_transform(self, samples, transform_y=True):
'return X and y'
result = self.implementation_module.extract_and_transform(self, samples, transform_y)
return result
def predict(self, samples):
X_test, y_test = self.extract_and_transform(samples, transform_y=False)
assert y_test is None
return self.implementation_module.predict(self, X_test)
def setattr(self, parameter, value):
setattr(self, parameter, value)
return self
if False:
pd()
pprint()
Features()
| rlowrance/re-avm | AVM.py | Python | bsd-3-clause | 3,934 | 0.001525 |
#!/usr/bin/env python
import ldapdomaindump
ldapdomaindump.main()
| dirkjanm/ldapdomaindump | ldapdomaindump/__main__.py | Python | mit | 66 | 0 |
#!/usr/bin/python3
import os
import os.path
import cgi, cgitb
import re
import pickle
#own packages
import dbcPattern
def dbc_main(): # NEW except for the call to processInput
form = cgi.FieldStorage() # standard cgi script lines to here!
# use format of next two lines with YOUR names and default data
filedata = form['upload']
if filedata.file:
contents, msg_list = processInput(filedata.file) # process input into a page
print(contents)
return msg_list
return -1
def processInput(file):
sig_num=0
sig_list=[]
'''Process input parameters and return the final page as a string.'''
if file: #field really is an upload
#msg_list=[{mesg1}{mesg2}{mesg3}{...}]
#Messages has numbered dicts signals in them
msg_list = dbcPattern.dbcDataReader(file)
for message in msg_list:
for j in range(message['sig_count']):
sig_num=sig_num+1
sig_list.append(message[j]['sig_name'])
return createHTML(sig_num, sig_list),msg_list
def createHTML(sig_num, sig_list):
signale=""
i=0
file=open("Part1.txt")
html_string = file.read()
file.close()
for sig_name in sorted(sig_list, key=str.lower):
signale+="{ sig_sel: '%s'}," %(sig_name)
# print(sig_name)
html_string+=signale[:-1]
# print(html_string)
file2=open("Part2.txt")
html_string+=file2.read()
file2.close()
file=open("htmltext.html",'w')
file.write(html_string)
file.close()
return html_string
#Muss später ins Hauptprogramm kopiert werden
try: # NEW
cgitb.enable()
print("Content-Type: text/html;charset:UTF-8") # say generating html
print("\n\n")
msg_list=dbc_main()
filename=os.path.join('/home/pi/datalogger/loggerconfigs/','testdump.txt')
with open(filename, 'wb') as file:
pickle.dump(msg_list, file)
except:
cgi.print_exception() # catch and print errors
| mauerflitza/Probieren2 | Webpage/cgi-bin/upload.py | Python | mit | 1,801 | 0.048333 |
# Colors
DARKBGCOLOR = tuple([93, 93, 93])
MEDBGCOLOR = tuple([73, 73, 73])
MAYABGCOLOR = tuple([68, 68, 68])
# DPI
DEFAULT_DPI = 96
# Pixel Size is not handled by dpi, use utils.dpiScale()
MARGINS = (2, 2, 2, 2) # default margins left, top, right, bottom
SPACING = 2
SSML = 4 # the regular spacing of each widget, spacing is between each sub widget
SREG = 6 # the regular spacing of each widget, spacing is between each sub widget
SLRG = 10 # larger spacing of each widget, spacing is between each sub widget
SVLRG = 15 # very large spacing of each widget, spacing is between each sub widget
TOPPAD = 10 # padding between the top widget and the top of frame. ie top of a toolset
BOTPAD = 5 # padding between the bottom widget and the bottom of frame. ie bot of a toolset
REGPAD = 10 # padding between widgets
SMLPAD = 5
LRGPAD = 15
WINSIDEPAD = 10 # the overall window each side
WINTOPPAD = 10 # the overall window padding at the top of frame
WINBOTPAD = 10 # the overall window padding at the bottom of frame
# Button Width Sizes
BTN_W_ICN_SML = 10
BTN_W_ICN_REG = 20
BTN_W_ICN_LRG = 40
BTN_W_REG_SML = 90
BTN_W_REG_LRG = 180
# Button Styles
BTN_DEFAULT = 0 # Default zoo extended button with optional text or an icon.
BTN_TRANSPARENT_BG = 1 # Default zoo extended button w transparent bg.
BTN_ICON_SHADOW = 2 # Main zoo IconPushButton button (icon in a colored box) with shadow underline
BTN_DEFAULT_QT = 3 # Default style uses vanilla QPushButton and not zoo's extended button
BTN_ROUNDED = 4 # Rounded button stylesheeted bg color and stylesheeted icon colour
# Colors
COLOR_ERROR = "00ff06" # fluorescent green
COLOR_ADMIN_GREEN = "17a600"
COLOR_ADMIN_GREEN_RGB = (23, 166, 0)
| dsparrow27/zoocore | zoo/libs/pyqt/uiconstants.py | Python | gpl-3.0 | 1,709 | 0.005266 |
###############################
# old_password_generator.py #
###############################
import string, random, sys
SELECT = string.ascii_letters + string.punctuation + string.digits
SAMPLE = random.SystemRandom().sample
def main():
while True:
size = get_size()
password = generate_pw(size)
print_pause(password)
def get_size():
while True:
try:
size = int(input('Size: '))
except ValueError:
print('Please enter a number.')
except EOFError:
sys.exit()
else:
if 1 <= size <= 80:
return size
print('Valid number range is 1 - 80.')
def generate_pw(size):
password = ''.join(SAMPLE(SELECT, size))
while not approved(password):
password = ''.join(SAMPLE(SELECT, size))
return password
def approved(password):
group = select(password[0])
for character in password[1:]:
trial = select(character)
if trial is group:
return False
group = trial
return True
def select(character):
for group in (string.ascii_uppercase,
string.ascii_lowercase,
string.punctuation,
string.digits):
if character in group:
return group
raise ValueError('Character was not found in any group!')
def print_pause(*values, sep=' ', end='\n', file=sys.stdout):
print(*values, sep=sep, end=end, file=file)
try:
input()
except EOFError:
pass
if __name__ == '__main__':
main()
###############################
# new_password_generator.py #
###############################
from random import SystemRandom
from string import ascii_lowercase, ascii_uppercase, digits, punctuation
CHOICE = SystemRandom().choice
GROUPS = ascii_lowercase, ascii_uppercase, digits, punctuation
def main():
while True:
print('Code:', make_password(get_size()))
def get_size():
while True:
try:
size = int(input('Size: '))
except ValueError:
print('Please enter a number.')
except EOFError:
raise SystemExit()
else:
if 10 <= size <= 80:
return size
print('Valid number range is 10 - 80.')
def make_password(size):
while True:
password = ''
pool = using = tuple(map(set, GROUPS))
while True:
selection = CHOICE(using)
character = CHOICE(tuple(selection))
password += character
if len(password) == size:
return password
selection.remove(character)
if not selection:
break
using = tuple(group for group in pool if group is not selection)
if __name__ == '__main__':
main()
| ActiveState/code | recipes/Python/578171_Just_Another_Password_Generator/recipe-578171.py | Python | mit | 2,824 | 0.004958 |
import optparse
import os
import shutil
import sys
import unittest
from itertools import izip
from . import util
from . import stats
#=============================================================================
# common utility functions for testing
def clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def make_clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def fequal(f1, f2, rel=.0001, eabs=1e-12):
"""assert whether two floats are approximately equal"""
if f1 == f2:
return
if f2 == 0:
err = f1
elif f1 == 0:
err = f2
else:
err = abs(f1 - f2) / abs(f2)
x = (err < rel)
if abs(f1 - f2) < eabs:
return
assert x, "%e != %e [rel=%f, abs=%f]" % (f1, f2, err, abs(f1 - f2))
def fequals(f1, f2, rel=.0001, eabs=1e-12):
for i, j in izip(f1, f2):
fequal(i, j, rel=rel, eabs=eabs)
def integrate(func, a, b, step):
return sum(func(i) * step for i in util.frange(a, b, step))
def eq_sample_pdf(samples, pdf,
ndivs=20, start=-util.INF, end=util.INF, pval=.05,
step=None):
"""Asserts a sample matches a probability density distribution"""
if step is None:
step = (max(samples) - min(samples)) / float(ndivs)
cdf = lambda x, params: integrate(pdf, x, x+step, step/10.0)
chi2, p = stats.chi_square_fit(cdf, [], samples,
ndivs=ndivs, start=start, end=end)
assert p >= pval, p
def eq_sample_pmf(samples, pmf, pval=.05):
"""Asserts a sample matches a probability mass distribution"""
import scipy.stats
hist = util.hist_dict(samples)
total = sum(hist.itervalues())
observed = []
expected = []
for sample, count in hist.iteritems():
if count >= 5:
observed.append(count)
expected.append(pmf(sample) * total)
chi2, p = scipy.stats.chisquare(
scipy.array(observed), scipy.array(expected))
assert p >= pval, p
_do_pause = True
def pause(text="press enter to continue: "):
"""Pause until the user presses enter"""
if _do_pause:
sys.stderr.write(text)
raw_input()
def set_pausing(enabled=True):
global _do_pause
_do_pause = enabled
#=============================================================================
# common unittest functions
def list_tests(stack=0):
# get environment
var = __import__("__main__").__dict__
for name, obj in var.iteritems():
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
for attr in dir(obj):
if attr.startswith("test"):
print "%s.%s" % (name, attr),
doc = getattr(obj, attr).__doc__
if doc:
print "--", doc.split("\n")[0]
else:
print
def test_main():
o = optparse.OptionParser()
o.add_option("-v", "--verbose", action="store_true",
help="Verbose output")
o.add_option("-q", "--quiet", action="store_true",
help="Minimal output")
o.add_option("-l", "--list_tests", action="store_true")
o.add_option("-p", "--pause", action="store_true")
conf, args = o.parse_args()
if conf.list_tests:
list_tests(1)
return
if conf.pause:
set_pausing(True)
else:
set_pausing(False)
# process unittest arguments
argv = [sys.argv[0]]
if conf.verbose:
argv.append("-v")
if conf.quiet:
argv.append("-q")
argv.extend(args)
# run unittest
unittest.main(argv=argv)
| wutron/compbio | rasmus/testing.py | Python | mit | 3,789 | 0.000792 |
import json
import os
from AppKit import NSApplication, NSStatusBar, NSMenu, NSMenuItem, NSVariableStatusItemLength, NSImage
from PyObjCTools import AppHelper
from project_cron.models import Schedule
from threading import Timer
from project_cron.utils import logutil
class App(NSApplication):
def finishLaunching(self):
# Make statusbar item
statusbar = NSStatusBar.systemStatusBar()
self.statusitem = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
self.icon = NSImage.alloc().initByReferencingFile_('icon.png')
self.icon.setScalesWhenResized_(True)
self.icon.setSize_((20, 20))
self.statusitem.setImage_(self.icon)
self._schedules = []
self._menu_items = []
self._initialize_schedules()
self._initialize_menu()
self._timer = Timer(60, self.timer_callback)
self._timer.start()
def _initialize_schedules(self):
USER_ROOT = os.path.expanduser('~')
DOCUMENTS = os.path.join(USER_ROOT, 'Documents')
SCHEDULES = os.path.join(DOCUMENTS, 'schedules.json')
schedules = json.load(open(SCHEDULES, encoding='utf8'))
for raw_info in schedules:
self._schedules.append(Schedule(raw_info))
def _initialize_menu(self):
self.menubarMenu = NSMenu.alloc().init()
for schedule in self._schedules:
menu_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(schedule.name, 'execute:', '')
self._menu_items.append(menu_item)
self.menubarMenu.addItem_(menu_item)
menu_item = NSMenuItem.separatorItem()
self.menubarMenu.addItem_(menu_item)
self.quit = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '')
self.menubarMenu.addItem_(self.quit)
self.statusitem.setMenu_(self.menubarMenu)
self.statusitem.setToolTip_('Crow')
def timer_callback(self):
self._timer = None
for schedule in self._schedules:
try:
schedule.execute()
except:
import traceback
logutil.error(schedule.name, traceback.format_exc())
interval = 60
self._timer = Timer(interval, self.timer_callback)
self._timer.start()
def execute_(self, notification):
for schedule in self._schedules:
if schedule.name == notification.title():
try:
schedule.execute_actions()
except:
import traceback
logutil.error(schedule.name, traceback.format_exc())
schedule._reset()
if __name__ == "__main__":
app = App.sharedApplication()
AppHelper.runEventLoop()
| ecleya/project_cron | main.py | Python | mit | 2,772 | 0.002165 |
import sys
current_word = None
current_count = 0
word = None
for line in sys.stdin:
line = line.strip()
word,count = line.split('\t',1)
try:
count = int(count)
except ValueError:
continue
if current_word == word:
current_count +=count
else:
if current_word:
print '%s\t%s' %(current_word,current_count)
current_count =count
current_word =word
if current_word ==word:
print '%s\t%s' % (current_word,current_count) | MihawkHu/CS433_ParallerProgramming_Project | Project4/wordcount/python_wordcount/reducer.py | Python | mit | 574 | 0.057491 |
import uuid
import arrow
from collections import namedtuple
HEADERS = ('start', 'stop', 'project', 'id', 'tags', 'updated_at')
class Frame(namedtuple('Frame', HEADERS)):
def __new__(cls, start, stop, project, id, tags=None, updated_at=None,):
try:
if not isinstance(start, arrow.Arrow):
start = arrow.get(start)
if not isinstance(stop, arrow.Arrow):
stop = arrow.get(stop)
except RuntimeError as e:
from .watson import WatsonError
raise WatsonError("Error converting date: {}".format(e))
start = start.to('local')
stop = stop.to('local')
if updated_at is None:
updated_at = arrow.utcnow()
elif not isinstance(updated_at, arrow.Arrow):
updated_at = arrow.get(updated_at)
if tags is None:
tags = []
return super(Frame, cls).__new__(
cls, start, stop, project, id, tags, updated_at
)
def dump(self):
start = self.start.to('utc').timestamp
stop = self.stop.to('utc').timestamp
updated_at = self.updated_at.timestamp
return (start, stop, self.project, self.id, self.tags, updated_at)
@property
def day(self):
return self.start.floor('day')
def __lt__(self, other):
return self.start < other.start
def __lte__(self, other):
return self.start <= other.start
def __gt__(self, other):
return self.start > other.start
def __gte__(self, other):
return self.start >= other.start
class Span(object):
def __init__(self, start, stop, timeframe='day'):
self.timeframe = timeframe
self.start = start.floor(self.timeframe)
self.stop = stop.ceil(self.timeframe)
def __contains__(self, frame):
return frame.start >= self.start and frame.stop <= self.stop
class Frames(object):
def __init__(self, frames=None):
if not frames:
frames = []
rows = [Frame(*frame) for frame in frames]
self._rows = rows
self.changed = False
def __len__(self):
return len(self._rows)
def __getitem__(self, key):
if key in HEADERS:
return tuple(self._get_col(key))
elif isinstance(key, int):
return self._rows[key]
else:
return self._rows[self._get_index_by_id(key)]
def __setitem__(self, key, value):
self.changed = True
if isinstance(value, Frame):
frame = value
else:
frame = self.new_frame(*value)
if isinstance(key, int):
self._rows[key] = frame
else:
frame = frame._replace(id=key)
try:
self._rows[self._get_index_by_id(key)] = frame
except KeyError:
self._rows.append(frame)
def __delitem__(self, key):
self.changed = True
if isinstance(key, int):
del self._rows[key]
else:
del self._rows[self._get_index_by_id(key)]
def _get_index_by_id(self, id):
try:
return next(
i for i, v in enumerate(self['id']) if v.startswith(id)
)
except StopIteration:
raise KeyError("Frame with id {} not found.".format(id))
def _get_col(self, col):
index = HEADERS.index(col)
for row in self._rows:
yield row[index]
def add(self, *args, **kwargs):
self.changed = True
frame = self.new_frame(*args, **kwargs)
self._rows.append(frame)
return frame
def new_frame(self, project, start, stop, tags=None, id=None,
updated_at=None):
if not id:
id = uuid.uuid4().hex
return Frame(start, stop, project, id, tags=tags,
updated_at=updated_at)
def dump(self):
return tuple(frame.dump() for frame in self._rows)
def filter(self, projects=None, tags=None, span=None):
return (
frame for frame in self._rows
if (projects is None or frame.project in projects) and
(tags is None or any(tag in frame.tags for tag in tags)) and
(span is None or frame in span)
)
def span(self, start, stop):
return Span(start, stop)
| yloiseau/Watson | watson/frames.py | Python | mit | 4,355 | 0 |
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import sure
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import drop_table, sync_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery
from tests.integration.cqlengine.base import BaseCassEngTestCase
from mock import patch
class TestMultiKeyModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer(required=False)
text = columns.Text(required=False)
class BatchQueryTests(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BatchQueryTests, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(BatchQueryTests, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def setUp(self):
super(BatchQueryTests, self).setUp()
self.pkey = 1
for obj in TestMultiKeyModel.filter(partition=self.pkey):
obj.delete()
def test_insert_success_case(self):
b = BatchQuery()
TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4')
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
b.execute()
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
def test_update_success_case(self):
inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4')
b = BatchQuery()
inst.count = 4
inst.batch(b).save()
inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2)
self.assertEqual(inst2.count, 3)
b.execute()
inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2)
self.assertEqual(inst3.count, 4)
def test_delete_success_case(self):
inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4')
b = BatchQuery()
inst.batch(b).delete()
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
b.execute()
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
def test_context_manager(self):
with BatchQuery() as b:
for i in range(5):
TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=i, count=3, text='4')
for i in range(5):
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
for i in range(5):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
def test_bulk_delete_success_case(self):
for i in range(1):
for j in range(5):
TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{0}:{1}'.format(i,j))
with BatchQuery() as b:
TestMultiKeyModel.objects.batch(b).filter(partition=0).delete()
self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 5)
self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 0)
#cleanup
for m in TestMultiKeyModel.all():
m.delete()
def test_empty_batch(self):
b = BatchQuery()
b.execute()
with BatchQuery() as b:
pass
class BatchQueryCallbacksTests(BaseCassEngTestCase):
def test_API_managing_callbacks(self):
# Callbacks can be added at init and after
def my_callback(*args, **kwargs):
pass
# adding on init:
batch = BatchQuery()
batch.add_callback(my_callback)
batch.add_callback(my_callback, 2, named_arg='value')
batch.add_callback(my_callback, 1, 3)
self.assertEqual(batch._callbacks, [
(my_callback, (), {}),
(my_callback, (2,), {'named_arg':'value'}),
(my_callback, (1, 3), {})
])
def test_callbacks_properly_execute_callables_and_tuples(self):
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
# adding on init:
batch = BatchQuery()
batch.add_callback(my_callback)
batch.add_callback(my_callback, 'more', 'args')
batch.execute()
self.assertEqual(len(call_history), 2)
self.assertEqual([(), ('more', 'args')], call_history)
def test_callbacks_tied_to_execute(self):
"""Batch callbacks should NOT fire if batch is not executed in context manager mode"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with BatchQuery() as batch:
batch.add_callback(my_callback)
self.assertEqual(len(call_history), 1)
class SomeError(Exception):
pass
with self.assertRaises(SomeError):
with BatchQuery() as batch:
batch.add_callback(my_callback)
# this error bubbling up through context manager
# should prevent callback runs (along with b.execute())
raise SomeError
# still same call history. Nothing added
self.assertEqual(len(call_history), 1)
# but if execute ran, even with an error bubbling through
# the callbacks also would have fired
with self.assertRaises(SomeError):
with BatchQuery(execute_on_exception=True) as batch:
batch.add_callback(my_callback)
raise SomeError
# updated call history
self.assertEqual(len(call_history), 2)
def test_callbacks_work_multiple_times(self):
"""
Tests that multiple executions of execute on a batch statement
logs a warning, and that we don't encounter an attribute error.
@since 3.1
@jira_ticket PYTHON-445
@expected_result warning message is logged
@test_category object_mapper
"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with warnings.catch_warnings(record=True) as w:
with BatchQuery() as batch:
batch.add_callback(my_callback)
batch.execute()
batch.execute()
self.assertEqual(len(w), 2) # package filter setup to warn always
self.assertRegexpMatches(str(w[0].message), r"^Batch.*multiple.*")
def test_disable_multiple_callback_warning(self):
"""
Tests that multiple executions of a batch statement
don't log a warning when warn_multiple_exec flag is set, and
that we don't encounter an attribute error.
@since 3.1
@jira_ticket PYTHON-445
@expected_result warning message is logged
@test_category object_mapper
"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with patch('cassandra.cqlengine.query.BatchQuery.warn_multiple_exec', False):
with warnings.catch_warnings(record=True) as w:
with BatchQuery() as batch:
batch.add_callback(my_callback)
batch.execute()
batch.execute()
self.assertFalse(w)
| coldeasy/python-driver | tests/integration/cqlengine/test_batch_query.py | Python | apache-2.0 | 8,003 | 0.002249 |
A = [[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0]]
def Distancias(n, origem):
d = [-1] * n
d[origem] = 0
f = []
f.append(origem)
while len(f) > 0:
x = f[0]
del f[0]
for y in range(n):
if A[x][y] == 1 and d[y] == -1:
d[y] = d[x] + 1
print (y)
f.append(y)
return d
print (Distancias(6, 3))
| andersonsilvade/python_C | Python32/ED/Distâncias em uma Rede.py | Python | mit | 444 | 0.027027 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import contextlib
import functools
from functools import partial
import math
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import inplace_ops
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value
def shakeshake2_py(x, y, equal=False, individual=False):
"""The shake-shake sum of 2 tensors, python version."""
if equal:
alpha = 0.5
elif individual:
alpha = tf.random_uniform(tf.get_shape(x)[:1])
else:
alpha = tf.random_uniform([])
return alpha * x + (1.0 - alpha) * y
@function.Defun()
def shakeshake2_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_indiv_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, individual=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_equal_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, equal=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun(grad_func=shakeshake2_grad)
def shakeshake2(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
@function.Defun(grad_func=shakeshake2_indiv_grad)
def shakeshake2_indiv(x1, x2):
return shakeshake2_py(x1, x2, individual=True)
@function.Defun(grad_func=shakeshake2_equal_grad)
def shakeshake2_eqgrad(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2."""
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2)
def convert_rgb_to_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
x /= 255.0
return x
def convert_rgb_to_symmetric_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
# Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in
# the range [-1, 1].
x = (x / 127.5) - 1
return x
def convert_real_to_rgb(x):
"""Conversion of real numbers to pixel values."""
with tf.name_scope("real_to_rgb", values=[x]):
x *= 255.0
return x
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):
"""Make x n-d with squeeze and expand_dims."""
if len(x.shape) > n:
while len(x.shape) != n:
x = tf.squeeze(x, [squeeze_dim])
else:
while len(x.shape) != n:
x = tf.expand_dims(x, expand_dim)
return x
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", [x]):
x_shape = shape_list(x)
x = tf.to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.square(x - x_mean), axis=[1, 2], keepdims=True)
num_pixels = tf.to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
# TODO(noam): remove this function after TPUs do gather faster.
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
# TODO(noam): remove this function after TPUs do cumsum faster.
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
"""
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
def dropout_no_scaling(x, keep_prob):
"""Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
"""
if keep_prob == 1.0:
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return x * cast_like(mask, x)
def embedding(x,
vocab_size,
dense_size,
name=None,
reuse=None,
multiplier=1.0,
symbol_dropout_rate=0.0,
embedding_var=None,
dtype=tf.float32):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype):
if embedding_var is None:
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
if not tf.contrib.eager.in_eager_mode():
embedding_var = convert_gradient_to_tensor(embedding_var)
x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)
emb_x = gather(embedding_var, x, dtype)
if multiplier != 1.0:
emb_x *= multiplier
static_shape = emb_x.shape.as_list()
if len(static_shape) < 5:
return emb_x
assert len(static_shape) == 5
# If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.
return tf.squeeze(emb_x, 3)
def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def shift_right_2d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in range(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = shape_list(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in range(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur_dim = shape_list(cur)[2]
if isinstance(cur_dim, int):
if cur_dim == 1:
cur = deconv1d(cur, i)
else:
cur = deconv2d(cur, i)
else:
cur = tf.cond(
tf.equal(cur_dim, 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
return conv_internal(
tf.layers.conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(
tf.expand_dims(inputs, 2),
filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1),
**kwargs), 2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,
**kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
tf.layers.conv2d(split, filters // separability, kernel_size,
**kwargs))
else:
parts.append(
tf.layers.separable_conv2d(split, filters // abs_sep,
kernel_size, **kwargs))
if separability > 1:
result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = tf.layers.separable_conv2d(inputs, filters, kernel_size,
**kwargs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"):
"""Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters].
"""
if kernel_size == 1:
return dense(inputs, filters, name=name, use_bias=True)
if padding == "SAME":
assert kernel_size % 2 == 1
first_offset = -((kernel_size - 1) // 2)
else:
assert padding == "LEFT"
first_offset = -(kernel_size - 1)
last_offset = first_offset + kernel_size - 1
results = []
padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])
for i in range(kernel_size):
shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs
shifted.set_shape(inputs.get_shape())
results.append(
dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i))
ret = tf.add_n(results)
ret *= kernel_size**-0.5
return ret
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias):
"""Layer norm raw computation."""
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias)
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
def noam_norm(x, epsilon=1.0, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(
tf.to_float(shape[-1])))
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
def apply_norm(x, norm_type, depth, epsilon):
"""Apply Normalization."""
if norm_type == "layer":
return layer_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return tf.layers.batch_normalization(x, epsilon=epsilon)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.")
def zero_add(previous_value, x, name=None, reuse=None):
"""Resnet connection with zero initialization.
Another type of resnet connection which returns previous_value + gamma * x.
gamma is a trainable scalar and initialized with zero. It is useful when a
module is plugged into a trained model and we want to make sure it matches the
original model's performance.
Args:
previous_value: A tensor.
x: A tensor.
name: name of variable scope; defaults to zero_add.
reuse: reuse scope.
Returns:
previous_value + gamma * x.
"""
with tf.variable_scope(name, default_name="zero_add", reuse=reuse):
gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer())
return previous_value + gamma * x
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(x, norm_type, depth, epsilon)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x
def layer_preprocess(layer_input, hparams):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess")
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess")
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
# Usage for normalize_fn kwarg:
# if not specified, use layer norm
# if given normalize_fn=None, don't use any normalization
# if given normalize_fn=norm, use the specified norm function
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 1d convolutions."""
return conv_block_internal(conv1d, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", values=[inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = shape_list(x)[1]
depth = shape_list(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
def length_from_embedding(emb):
"""Compute the length of each sequence in the batch.
Args:
emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].
Returns:
a Tensor with shape [batch].
"""
return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return ones_matrix_band_part(
target_length,
source_length,
-1,
0,
out_shape=[1, target_length, source_length])
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typically the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs
def dense_relu_dense(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
layer_name = "%s_{}" % name if name else "{}"
h = dense(
inputs,
filter_size,
use_bias=True,
activation=tf.nn.relu,
name=layer_name.format("conv1"))
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = dense(
h,
output_size,
activation=output_activation,
use_bias=True,
name=layer_name.format("conv2"))
return o
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect."""
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs)
def conv_relu_conv(inputs,
filter_size,
output_size,
first_kernel_size=3,
second_kernel_size=3,
padding="SAME",
nonpadding_mask=None,
dropout=0.0,
name=None,
cache=None,
decode_loop_step=None):
"""Hidden layer with RELU activation followed by linear projection.
Args:
inputs: A tensor.
filter_size: An integer.
output_size: An integer.
first_kernel_size: An integer.
second_kernel_size: An integer.
padding: A string.
nonpadding_mask: A tensor.
dropout: A float.
name: A string.
cache: A dict, containing Tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU. If it is not None, the function
will do inplace update for the cache instead of concatenating the
current result to the cache.
Returns:
A Tensor.
"""
with tf.variable_scope(name, "conv_relu_conv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if cache:
if decode_loop_step is None:
inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1)
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
# The performance of current implementation is better than updating
# the tensor by adding the result of matmul(one_hot,
# update_in_current_step)
tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2])
tmp_f = inplace_ops.alias_inplace_update(
tmp_f,
decode_loop_step * tf.shape(inputs)[1],
tf.transpose(inputs, perm=[1, 0, 2]))
inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2])
inputs = cache["f"] = inputs[:, -first_kernel_size:, :]
h = tpu_conv1d(
inputs, filter_size, first_kernel_size, padding=padding, name="conv1")
if cache:
h = h[:, -1:, :]
h = tf.nn.relu(h)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
return tpu_conv1d(
h, output_size, second_kernel_size, padding=padding, name="conv2")
def sepconv_relu_sepconv(inputs,
filter_size,
output_size,
first_kernel_size=(1, 1),
second_kernel_size=(1, 1),
padding="LEFT",
nonpadding_mask=None,
dropout=0.0,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = separable_conv(
inputs,
filter_size,
first_kernel_size,
activation=tf.nn.relu,
padding=padding,
name="conv1")
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
ret = separable_conv(
h, output_size, second_kernel_size, padding=padding, name="conv2")
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
second_kernel_size=(1, 1),
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
conv_f1 = conv if kernel_size == (1, 1) else separable_conv
h = conv_f1(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv
ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def gru_feedfwd(a_t, h_prev, filters, name=None):
"""position-wise Feed-fwd GRU gates following the MPNN.
Args:
a_t: Tensor of shape [batch, length, depth] of current input
h_prev: Tensor of shape [batch, length, depth] of prev input
filters: an integer specifying number of dimensions of the filters
name: A string
Returns:
h_t: [batch, length, filters] hidden state
"""
with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
# we use right matrix multiplication to handle batches
# W_z and W_r have shape 2d, d. U_z U_r have shape d,d
z_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
r_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
h_tilde = (
tf.tanh(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
h_t = (1. - z_t) * h_prev + z_t * h_tilde
return h_t
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
if dropout > 0.0:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return tf.to_float(tf.not_equal(labels, 0))
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)
nonzero = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))
def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task."""
weights = tf.to_float(tf.not_equal(labels, 0))
if taskid < 0:
raise ValueError("Task ID must be non-negative.")
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
example_mask = tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = tf.to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1)
def weights_multi_problem_input(labels, taskid=-1):
"""Assign weight 1.0 to only the inputs for the given task."""
weights_all_tokens = weights_multi_problem_all(labels, taskid)
weights_target = weights_multi_problem(labels, taskid)
return weights_all_tokens - weights_target
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True,
cutoff=0.0,
gaussian=False):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
optionally a FactoredTensor.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
cutoff: a float, at which point to have no loss.
gaussian: If true, use a Gaussian distribution for label smoothing
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
Raises:
ValueError: in case of unsupported argument types.
"""
if isinstance(logits, FactoredTensor):
if gaussian:
raise ValueError("Factored padded cross entropy with Gaussian smoothing "
"is not implemented yet.")
return padded_cross_entropy_factored(
logits,
labels,
label_smoothing,
weights_fn=weights_fn,
reduce_sum=reduce_sum)
confidence = 1.0 - label_smoothing
logits_shape = shape_list(logits)
vocab_size = logits_shape[-1]
with tf.name_scope("padded_cross_entropy", values=[logits, labels]):
if len(logits_shape) == 2:
# Deal with the case where we did not insert extra dimensions due to
# TPU issues. No pad-to-same-length happens in this case.
# TODO(noam): remove this logic once TPU can handle extra dimensions.
labels = tf.reshape(labels, [-1])
else:
logits, labels = pad_with_zeros(logits, labels)
logits = tf.reshape(
logits,
shape_list(labels) + [vocab_size],
name="padded_cross_entropy_size_check")
logits = tf.cast(logits, tf.float32)
xent = smoothing_cross_entropy(
logits, labels, vocab_size, confidence, gaussian=gaussian)
weights = weights_fn(labels)
if cutoff > 0.0:
xent = tf.nn.relu(xent - cutoff)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def _weights_one_third(labels):
"""Returns Tensor of shape [batch, height, width]. Each element is 1/3."""
return tf.ones(tf.shape(labels)[:-1]) / 3.
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):
"""Discretized mixture of logistics loss.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of 8-bit pixel
intensities. The computation assumes channels is 3.
weights_fn: A function of labels, returning a Tensor of shape
[batch, height, width] which weights each loss term. Default is to scale
each loss term by 1/3 so that they capture the average across channels.
reduce_sum: A boolean, to return scalar loss instead of per position.
Returns:
Tuple of loss tensors for numerator and denominator, each a scalar if
reduce_sum else of shape [batch, height, width]. The sum of their divisions
is the number of nats for each pixel in labels.
"""
real_labels = convert_rgb_to_symmetric_real(labels)
dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)
weights = weights_fn(labels)
loss_num = weights * dml_loss_value
loss_den = weights_nonzero(weights)
if reduce_sum:
loss_num = tf.reduce_sum(loss_num)
loss_den = tf.reduce_sum(loss_den)
return loss_num, loss_den
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
batch, height, width, output_dim = shape_list(inputs)
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs
def discretized_mix_logistic_loss(pred, labels):
"""Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X > x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X > 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Tile labels to broadcast compute across the mixture dimension.
batch, height, width, num_mixtures = shape_list(logits)
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
# p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -
# sigmoid((x - means_i - 1/255.)/scale_i)
# for each channel i. The means are linearly parameterized.
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
# Compute log probability for edge case of 0 (before scaling), 255 (before
# scaling), and all other cases respectively.
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
# Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);
# (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may
# cause `tf.log(0.)`; (d) p(x) < 1e-5.
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
# Sum over channels and compute log-probability of each mixture.
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output
def sample_from_discretized_mix_logistic(pred, seed=None):
"""Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Sample mixture indicator given logits using the gumbel max trick.
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
# Select mixture component's parameters.
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
# Sample from 3-D logistic & clip to interval. Note we don't round to the
# nearest 8-bit value when sampling.
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log(1. - uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
with tf.name_scope("global_pool", values=[inputs]):
if mask is not None:
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if pooling_type == "MAX":
# A tf.pool can be used here, but reduce is cleaner
output = tf.reduce_max(inputs, axis=1)
elif pooling_type == "AVR":
if mask is not None:
# Some elems are dummy elems so we can't just reduce the average.
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output
def gated_linear_unit_layer(x, name=None):
"""Gated linear unit layer.
Paper: Language Modeling with Gated Convolutional Networks.
Link: https://arxiv.org/abs/1612.08083
x = Wx * sigmoid(W'x).
Args:
x: A tensor
name: A string
Returns:
A tensor of the same shape as x.
"""
with tf.variable_scope(name, default_name="glu_layer", values=[x]):
depth = shape_list(x)[-1]
x = tf.layers.dense(x, depth * 2, activation=None)
x, gating_x = tf.split(x, 2, axis=-1)
return x * tf.nn.sigmoid(gating_x)
def sru_with_scan(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
This implementation uses tf.scan and can incur overhead, see the full SRU
function doc for details and an implementation that is sometimes faster.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0.
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
# SRU state manipulation function.
def next_state(cur_state, args_tup):
cur_x_times_one_minus_f, cur_f = args_tup
return cur_f * cur_state + cur_x_times_one_minus_f
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
c_states = tf.scan(
next_state, (x_times_one_minus_f, f),
initializer=initial_state,
parallel_iterations=2,
name="scan_%d" % i)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
# Transpose back to batch-major.
x = tf.transpose(x, [1, 0, 2])
return tf.reshape(x, x_shape)
class CumsumprodCell(object):
"""Cumulative sum and product object for use with functional_rnn API."""
def __init__(self, initializer):
self._initializer = initializer
@property
def output_size(self):
return int(shape_list(self._initializer)[-1])
def zero_state(self, batch_size, dtype):
dtype = dtype or tf.float32
return tf.zeros([batch_size, self.output_size], dtype=dtype)
def __call__(self, inputs_t, state_t):
cur_x_times_one_minus_f, cur_f = tf.split(inputs_t, 2, axis=-1)
state_next = cur_f * state_t + cur_x_times_one_minus_f
outputs_t = state_next
return outputs_t, state_next
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape)
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs
def ravanbakhsh_set_layer(layer_size,
inputs,
mask=None,
sequential=False,
activation_fn=tf.nn.tanh,
dropout=0.0,
name=None):
"""Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .
More parameter-efficient version of a linear-set-layer with context.
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, vector]
containing the sequences of input vectors.
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
sequential: If true, will use a running global pool so each element will
only depend on those before it. Set true if this layer is being used in
an output sequence.
activation_fn: The activation function to use.
dropout: dropout.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, vector] containing the
sequences of transformed vectors.
"""
del dropout
with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]):
if sequential:
return linear_set_layer(
layer_size,
inputs - running_global_pool_1d(inputs),
activation_fn=activation_fn,
name=name)
return linear_set_layer(
layer_size,
inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),
activation_fn=activation_fn,
name=name)
def fn_device_dependency_dict():
"""State container for fn_device_dependency."""
if not hasattr(tf.get_default_graph(), "dependency_dict"):
setattr(tf.get_default_graph(), "dependency_dict", defaultdict(list))
return tf.get_default_graph().dependency_dict
@contextlib.contextmanager
def fn_device_dependency(name, device=""):
"""Add control deps for name and device."""
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], (list, tuple)):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body()
def underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
"""
t = underlying_variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
"""
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis)
class FactoredTensor(object):
"""A concise factored representation of Tensor as two tensors.
This class represents the tensor tf.matmul(a, b, transpose_b=True)
by storing the values of Tensors a and b.
The reason for this is that the product may be too big to fully realize at
once, so it can be realized a part at a time.
"a" may have extra leading dimensions, in which case they are flattened out
before computing the matrix product, then re-expanded afterwards.
"""
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
def _convert_factored_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)
tf.register_tensor_conversion_function(FactoredTensor,
_convert_factored_tensor_to_tensor)
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None
@function.Defun(
noinline=True,
python_grad_func=smoothing_cross_entropy_factored_grad,
compiled=True,
separate_compiled_gradients=True)
def smoothing_cross_entropy_factored(a, b, labels, confidence):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
a: a Tensor with shape [batch, inner_dim]
b: a Tensor with shape [vocab_size, inner_dim]
labels: an integer Tensor with shape [batch]
confidence: a float
Returns:
A Tensor with shape [batch]
"""
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
parts = []
for part in range(num_splits):
with tf.control_dependencies(parts[-1:]):
logits = tf.matmul(a[part], b, transpose_b=True)
parts.append(
smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))
return tf.concat(parts, 0)
def padded_cross_entropy_factored(factored_logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
factored_logits: a `FactoredTensor` representing a Tensor
with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
a = factored_logits.a
b = factored_logits.b
confidence = 1.0 - label_smoothing
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
labels_flat = tf.reshape(labels, [-1])
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,
tf.convert_to_tensor(confidence))
xent = tf.reshape(xent, shape_list(labels))
weights = weights_fn(labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
"""
def dec(fn):
@functools.wraps(fn)
def wrapped(*args):
return _fn_with_custom_grad(
fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
vs = tf.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out
_function_cache = {}
def conv_hidden_relu_memory_efficient(x,
filter_size,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""LayerNorm, Conv, ReLU, Conv.
All convolutions have kernel size 1.
returns conv(relu(conv(layer_norm(x))))
Args:
x: input Tensor with shape [batch, length, io_size]
filter_size: an integer - size of the hidden layer.
epsilon: a float (for layer norm)
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
a Tensor with shape [batch, length, io_size]
"""
io_size = x.get_shape().as_list()[-1]
def forward_internal(x, f1, f2, scale, bias):
"""Forward function."""
# split batch-wise to avoid exhausting memory in cast the batch is large
# and the hidden layer is large.
num_splits = 4
x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]])
xs = approximate_split(x_flat, num_splits)
ys = []
for i in range(num_splits):
with tf.control_dependencies(ys[-1:]):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
ys.append(y)
y = tf.concat(ys, 0)
y = tf.reshape(y, shape_list(x))
return y
key = ("conv_hidden_relu_memory_efficient %s" % epsilon)
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, f1, f2, scale, bias, dy):
"""Gradient for efficiency."""
with tf.control_dependencies([dy]):
num_splits = 4
x_shape = shape_list(x)
flat_shape = [-1, 1, x_shape[2]]
x = tf.reshape(x, flat_shape)
dy = tf.reshape(dy, flat_shape)
xs = approximate_split(x, num_splits)
dys = approximate_split(dy, num_splits)
dxs = []
df1 = 0
df2 = 0
dscale = 0
dbias = 0
deps = []
for i in range(num_splits):
with tf.control_dependencies(deps):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(
ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])
df1 += pdf1
df2 += pdf2
dscale += pdscale
dbias += pdbias
dxs.append(dxi)
deps = [dxi, df1, df2, dscale, dbias]
with tf.control_dependencies(deps):
dx = tf.concat(dxs, 0)
dx = tf.reshape(dx, x_shape)
return dx, df1, df2, dscale, dbias
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, f1, f2, scale, bias):
return forward_internal(x, f1, f2, scale, bias)
with tf.variable_scope(name, default_name="ffn2", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
f1, f2, scale, bias = list(test_vars)
else:
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
scale, bias = layer_norm_vars(io_size)
if forget:
y = forward_fn(x, f1, f2, scale, bias)
else:
y = forward_internal(x, f1, f2, scale, bias)
y.set_shape(x.get_shape())
return y
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in range(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def list_product(els):
prod = els[0]
for el in els[1:]:
prod *= el
return prod
def sample_with_temperature(logits, temperature):
"""Either argmax or random sampling.
Args:
logits: a Tensor.
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor with one fewer dimension than logits.
"""
if temperature == 0.0:
# TF argmax doesn't handle >5 dimensions, so we reshape here.
logits_shape = shape_list(logits)
argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
return tf.reshape(argmax, logits_shape[:-1])
else:
assert temperature > 0.0
reshaped_logits = (
tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b."""
ret = tf.reshape(a, tf.shape(b))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape())
return ret
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
"""
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
def _recompute_grad(fn, args):
"""See recompute_grad."""
cached_vs = []
cached_arg_scope = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Recompute outputs for gradient computation."""
del outputs
variables = [underlying_variable_ref(v) for v in variables]
# Recompute outputs
with tf.control_dependencies(output_grads):
with tf.contrib.framework.arg_scope(cached_arg_scope[0]):
with tf.variable_scope(cached_vs[0], reuse=True):
outputs = fn(*inputs)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = tf.gradients(outputs, inputs + variables, output_grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
# TODO(rsepassi): Make fn_with_custom_grad work with bfloat16.
# If the input gradients are bfloat16, it's assumed the variables are
# bfloat16. This is a hack to ensure that grad_vars are the right type.
if grad_inputs[0].dtype == tf.bfloat16:
grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars]
return grad_inputs, grad_vars
@fn_with_custom_grad(grad_fn)
def fn_with_recompute(*args):
cached_vs.append(tf.get_variable_scope())
cached_arg_scope.append(tf.contrib.framework.current_arg_scope())
return fn(*args)
return fn_with_recompute(*args)
def dense(x, units, **kwargs):
"""Identical to tf.layers.dense."""
return tf.layers.dense(x, units, **kwargs)
def batch_dense(inputs,
units,
activation=None,
kernel_initializer=None,
reuse=None,
name=None):
"""Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known.
"""
inputs_shape = shape_list(inputs)
if len(inputs_shape) != 3:
raise ValueError("inputs must have 3 dimensions")
batch = inputs_shape[0]
input_units = inputs_shape[2]
if not isinstance(batch, int) or not isinstance(input_units, int):
raise ValueError("inputs must have static dimensions 0 and 2")
with tf.variable_scope(
name,
default_name="batch_dense",
values=[inputs],
reuse=reuse,
dtype=inputs.dtype):
if kernel_initializer is None:
kernel_initializer = tf.random_normal_initializer(
stddev=input_units**-0.5)
w = tf.get_variable(
"w", [batch, input_units, units],
initializer=kernel_initializer,
dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if activation is not None:
y = activation(y)
return y
def mix(x1,
x2,
steps,
is_training,
min_prob=0.0,
max_prob=1.0,
mode="lin",
simple=False,
broadcast_last=False):
"""Mix starting with x2, mixing mixing, going towards x1."""
with tf.name_scope("mix"):
if not is_training:
if max_prob >= 1.0:
return x1
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, max_prob))
return alpha * x1 + (1.0 - alpha) * x2
def get_res():
"""Create the result.
Separate function to speed it up later (see below).
Returns:
Tensor of mixed inputs.
"""
if mode == "lin":
alpha_p = inverse_lin_decay(steps)
else:
alpha_p = inverse_exp_decay(steps)
alpha_p = alpha_p * (max_prob - min_prob) + min_prob
if simple:
return alpha_p * x1 + (1.0 - alpha_p) * x2
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, alpha_p))
return alpha * x1 + (1.0 - alpha) * x2
if max_prob < 1.0:
return get_res()
# Prevent sampling after steps is passed to speed it up.
if is_xla_compiled():
return get_res()
else:
cur_step = tf.train.get_global_step()
if cur_step is None:
return x1 # Step not available, probably eval mode, don't mix.
return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1)
def brelu(x):
"""Bipolar ReLU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.relu(x1)
y2 = -tf.nn.relu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def belu(x):
"""Bipolar ELU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.elu(x1)
y2 = -tf.nn.elu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def nac(x, depth, name=None, reuse=None):
"""NAC as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse):
x_shape = shape_list(x)
w = tf.get_variable("w", [x_shape[-1], depth])
m = tf.get_variable("m", [x_shape[-1], depth])
w = tf.tanh(w) * tf.nn.sigmoid(m)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
res_flat = tf.matmul(x_flat, w)
return tf.reshape(res_flat, x_shape[:-1] + [depth])
def nalu(x, depth, epsilon=1e-30, name=None, reuse=None):
"""NALU as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse):
x_shape = shape_list(x)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
gw = tf.get_variable("w", [x_shape[-1], depth])
g = tf.nn.sigmoid(tf.matmul(x_flat, gw))
g = tf.reshape(g, x_shape[:-1] + [depth])
a = nac(x, depth, name="nac_lin")
log_x = tf.log(tf.abs(x) + epsilon)
m = nac(log_x, depth, name="nac_log")
return g * a + (1 - g) * tf.exp(m)
def argmax_with_score(logits, axis=None):
"""Argmax along with the value."""
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores
def log_prob_from_logits(logits, reduce_axis=-1):
return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True)
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.contrib.eager.in_eager_mode():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x.name,
x.device, cast_x.device)
return cast_x
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
def sliced_gan_loss(input1,
input2,
discriminator,
num_vecs,
do_random_vecs=True,
do_tanh=True,
return_logits=False):
"""Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947.
Puts input1 and input2 through the provided discriminator to get logits.
Then, computes num_vecs random projections of the logits, sorts them on
the batch dimension and returns the L2 loss between the sorted vectors.
See the above-mentioned paper for the reasoning behind it.
Args:
input1: first discriminator inputs.
input2: second discriminator inputs.
discriminator: inputs -> logits function.
num_vecs: how many random vectors to use for projections.
do_random_vecs: whether to use random vectors or just tanh of the logits.
do_tanh: if true (default) we'll also just use tanh of the logits.
return_logits: Whether or not to return the logits.
Returns:
The generator loss, i.e., the sliced approximation of the distance between
the projected distributions (warning: discriminator should maximize it).
"""
with tf.variable_scope("sliced_gan"):
with tf.variable_scope("discriminator"):
logits1 = discriminator(input1)
with tf.variable_scope("discriminator", reuse=True):
logits2 = discriminator(input2)
if do_random_vecs:
random_vecs = tf.nn.l2_normalize(
tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0)
def get_sorted_projections(x):
"""Make projections of x and sort them on the batch dimension."""
x = tf.reshape(x, [-1, shape_list(x)[-1]])
batch_size = shape_list(x)[0]
if do_random_vecs and do_tanh:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1)
elif do_random_vecs:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.matmul(n, random_vecs)
else:
proj = tf.tanh(x)
proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this.
if is_xla_compiled():
proj_dtype = proj.dtype
proj = tf.cast(proj, tf.bfloat16)
# Currently TPU only supports 1-D top_k calls.
map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0]
values = tf.map_fn(map_fn, proj)
values = tf.cast(values, proj_dtype)
else:
values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True)
return values
proj1 = get_sorted_projections(logits1)
proj2 = get_sorted_projections(logits2)
dist = tf.reduce_mean(tf.square(proj1 - proj2))
if return_logits:
return dist, logits1, logits2
return dist
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deep_discriminator(x,
batch_norm,
is_training,
filters=64,
filter_size=4,
stride=2,
output_size=1024):
"""Discriminator architecture based on InfoGAN."""
with tf.variable_scope(
"discriminator", initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = shape_list(x)[:3]
net = tf.layers.conv2d(
x, filters, filter_size, strides=stride, padding="SAME", name="conv1")
net = lrelu(net)
net = tf.layers.conv2d(
net,
2 * filters,
filter_size,
strides=stride,
padding="SAME",
name="conv2")
# [bs, h/4, w/4, 128]
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
net = tf.reduce_mean(net, axis=[1, 2])
else:
net = tf.reshape(net, [batch_size, size * 8])
net = tf.layers.dense(net, output_size, name="d_fc3")
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = tf.layers.conv2d(
x,
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))
if do_norm == "layer":
x = tf.contrib.layers.layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = tf.layers.dense(x, num_heads, name="mean_attn")
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return tf.layers.dense(tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]),
2 * shape[-1], name="mean_attn_final")
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = tf.layers.conv2d(
x, filters, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = tf.layers.conv2d(
x, filters1, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = tf.layers.conv2d(
x, filters2, kernel_size, strides=strides, padding="SAME", name="conv2")
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3]
return tf.image.resize_images(inputs, (height * f, width * f), method)
def tpu_safe_image_summary(image):
if is_xla_compiled():
# We only support float32 images at the moment due to casting complications.
if image.dtype != tf.float32:
image = tf.to_float(image)
else:
image = tf.cast(image, tf.uint8)
return image
# This has been (shamefully) copied from
# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py
#
# tensorflow/models cannot be pip installed, and even if it were we don't want
# to depend on all the models in it.
#
# Therefore copying and forgoing any more bugfixes into it is the most
# expedient way to use this function.
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = tf.contrib.layers.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding="valid")
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
def weight_targeting(w, k):
"""Weight-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
transpose_w = tf.transpose(w)
thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k]
mask = tf.to_float(thres[None, :] >= tf.abs(w))
return tf.reshape(mask, w_shape)
def unit_targeting(w, k):
"""Unit-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
norm = tf.norm(w, axis=0)
thres = tf.contrib.framework.sort(norm, axis=0)[k]
mask = tf.to_float(thres >= norm)[None, :]
mask = tf.tile(mask, [size, 1])
return tf.reshape(mask, w_shape)
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(tf.to_float(k) * tf.to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
"""KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss.
"""
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / tf.to_float(batch_size)
def sparse_equals_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
dense_shape=tensor.dense_shape,
values=tf.equal(tensor.values, constant))
def sparse_expand_dims(tensor, current_num_dims, axis=0):
if axis == -1:
axis = current_num_dims
new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64)
cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims)
shape = tf.unstack(tensor.dense_shape, num=current_num_dims)
new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1)
return tf.SparseTensor(
indices=new_indices,
values=tensor.values,
dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:]))
def sparse_add_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
values=constant + tensor.values,
dense_shape=tensor.dense_shape)
def sparse_eye(size):
indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64)
values = tf.ones(size)
dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)]
return tf.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
# modification from https://github.com/tensorflow/tensorflow/pull/21276
# without special initialization for g
class WeightNorm(tf.keras.layers.Wrapper):
""" This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNorm wrapper works for keras and tf layers.
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer))
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_checkpointable(layer, name="layer")
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = tf.layers.InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())
| mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/layers/common_layers.py | Python | apache-2.0 | 132,971 | 0.007994 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0022_add_site_name'),
]
operations = [
migrations.AlterField(
model_name='pagerevision',
name='user',
field=models.ForeignKey(
on_delete=django.db.models.deletion.SET_NULL,
verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True
),
),
]
| chrxr/wagtail | wagtail/wagtailcore/migrations/0023_alter_page_revision_on_delete_behaviour.py | Python | bsd-3-clause | 622 | 0.001608 |
# -*- mode:python -*-
import flask
import json
import logging
from datetime import datetime
import inflection
from functools import wraps
from flask import request, url_for
from werkzeug.exceptions import HTTPException
from .client.api.model import *
from . import database
from . import helpers
from .application import db
mgr = database.DatabaseManager(db)
log = logging.getLogger(__name__)
api = flask.Blueprint('api', __name__)
# =============================================================================
# API Helpers
# =============================================================================
def route_api(application, *args, **kwargs):
def decorator(fn):
@application.route(*args, **kwargs)
@wraps(fn)
def wrapper(*args, **kwargs):
headers = None
status_code = 200
try:
value = fn(*args, **kwargs)
except HTTPException as e:
raise helpers.set_exception_response(e)
if isinstance(value, tuple):
if len(value) > 2:
headers = value[2]
status_code = value[1]
value = value[0]
return helpers.jsonify(value, status_code, headers)
return fn
return decorator
def _dashboard_sort_column():
"""Return a SQLAlchemy column descriptor to sort results by, based on
the 'sort' and 'order' request parameters.
"""
columns = {
'created' : database.DashboardRecord.creation_date,
'modified' : database.DashboardRecord.last_modified_date,
'category' : database.DashboardRecord.category,
'id' : database.DashboardRecord.id,
'title' : database.DashboardRecord.title
}
colname = helpers.get_param('sort', 'created')
order = helpers.get_param('order')
column = database.DashboardRecord.creation_date
if colname in columns:
column = columns[colname]
if order == 'desc' or order == u'desc':
return column.desc()
else:
return column.asc()
def _set_dashboard_hrefs(dash):
"""Add the various ReSTful hrefs to an outgoing dashboard
representation. dash should be the dictionary for of the dashboard,
not the model object.
"""
id = dash['id']
dash['href'] = url_for('api.dashboard_get', id=id)
dash['definition_href'] = url_for('api.dashboard_get_definition', id=id)
dash['view_href'] = url_for('ui.dashboard_with_slug',
id=id,
slug=inflection.parameterize(dash['title']))
if 'definition' in dash:
definition = dash['definition']
definition['href'] = url_for('api.dashboard_get_definition', id=id)
return dash
def _dashboards_response(dashboards):
"""Return a Flask response object for a list of dashboards in API
format. dashboards must be a list of dashboard model objects, which
will be converted to their JSON representation.
"""
if not isinstance(dashboards, list):
dashboards = [dashboards]
include_definition = helpers.get_param_boolean('definition', False)
return [ _set_dashboard_hrefs(d.to_json(include_definition=include_definition)) for d in dashboards]
def _set_tag_hrefs(tag):
"""Add ReSTful href attributes to a tag's dictionary
representation.
"""
id = tag['id']
tag['href'] = url_for('api.tag_get', id=id)
return tag
def _tags_response(tags):
"""Return a Flask response object for a list of tags in API
format. tags must be a list of tag model objects, which
will be converted to their JSON representation.
"""
if not isinstance(tags, list):
tags = [tags]
return [_set_tag_hrefs(t.to_json()) for t in tags]
# =============================================================================
# Dashboards
# =============================================================================
@route_api(api, '/dashboard/')
def dashboard_list():
"""Listing for all dashboards. Returns just the metadata, not the
definitions.
"""
imported_from = request.args.get('imported_from')
if imported_from:
query = database.DashboardRecord.query.filter_by(imported_from=imported_from) \
.order_by(_dashboard_sort_column())
else:
query = database.DashboardRecord.query.order_by(_dashboard_sort_column())
dashboards = [d for d in query.all()]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/tagged/<tag>')
def dashboard_list_tagged(tag):
"""Listing for a set of dashboards with a tag applied. Returns just
the metadata, not the definitions.
"""
tag = database.TagRecord.query.filter_by(name=tag).first()
if not tag:
return _dashboards_response([])
dashboards = [d for d in tag.dashboards.order_by(_dashboard_sort_column()) if tag]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/<category>')
def dashboard_list_dashboards_in_category(category):
"""Listing for a set of dashboards in a specified category. Returns
just the metadata, not the definitions.
"""
dashboards = [d for d in database.DashboardRecord.query
.filter_by(category=category)
.order_by(_dashboard_sort_column()) ]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/')
def dashboard_list_all_dashboard_categories():
result = db.session.query(
database.DashboardRecord.category,
db.func.count(database.DashboardRecord.category)
).group_by(database.DashboardRecord.category).all()
categories = []
for (name, count) in result:
categories.append({
'name' : name,
'count' : count,
})
return categories
@route_api(api, '/dashboard/<id>')
def dashboard_get(id):
"""Get the metadata for a single dashboard.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
rendering = helpers.get_param('rendering', False)
include_definition = helpers.get_param_boolean('definition', False)
dash = _set_dashboard_hrefs(dashboard.to_json(rendering or include_definition))
if rendering:
dash['preferences'] = helpers.get_preferences()
return dash
@route_api(api, '/dashboard/<id>/for-rendering')
def dashboard_get_for_rendering(id):
"""Get a dashboard with its definition, and current settings necessary
for rendering.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
dash = _set_dashboard_hrefs(dashboard.to_json(True))
return {
'dashboard' : dash,
'preferences' : helpers.get_preferences()
}
@route_api(api, '/dashboard/', methods=['POST'])
def dashboard_create():
"""Create a new dashboard with an empty definition.
"""
dashboard = database.DashboardRecord.from_json(request.json)
if not dashboard.title:
return {
'error_message': "Missing required field 'title'"
}, 400
if 'definition' in request.json:
dashboard.definition = database.DefinitionRecord(dumps(request.json['definition']))
else:
dashboard.definition = database.DefinitionRecord(dumps(DashboardDefinition()))
mgr.store_dashboard(dashboard)
href = url_for('api.dashboard_get', id=dashboard.id)
return {
'dashboard_href' : href,
'view_href' : url_for('ui.dashboard_with_slug',
id=dashboard.id,
slug=inflection.parameterize(dashboard.title))
}, 201, { 'Location' : href }
@route_api(api, '/dashboard/<id>', methods=['PUT'])
def dashboard_update(id):
"""Update the metadata for an existing dashboard.
"""
body = request.json
dashboard = database.DashboardRecord.query.get_or_404(id)
dashboard.merge_from_json(body)
mgr.store_dashboard(dashboard)
# TODO - return similar to create, above
return {}
@route_api(api, '/dashboard/<id>', methods=['DELETE'])
def dashboard_delete(id):
"""Delete a dashboard. Use with caution.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
db.session.delete(dashboard)
db.session.commit()
return {}, 204
@route_api(api, '/dashboard/<id>/definition')
def dashboard_get_definition(id):
"""Fetch the definition for a dashboard. This returns the
representation to use when modifiying a dashboard.
"""
dashboard = database.DashboardRecord.query.filter_by(id=id)[0]
definition = database.DashboardRecord.query.get_or_404(id).definition.to_json()
definition['href'] = url_for('api.dashboard_get_definition', id=id)
definition['dashboard_href'] = url_for('api.dashboard_get', id=id)
return definition
@route_api(api, '/dashboard/<id>/definition', methods=['PUT'])
def dashboard_update_definition(id):
"""Update the definition of the dashboard. This should use the
representation returned by /api/dashboard/<id>/definition, and
should NOT have any embedded variables expanded, nor should it
have complete graphite URLs in the queries.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
# Validate the payload
definition = DashboardDefinition.from_json(json.loads(request.data.decode('utf-8')))
if dashboard.definition:
dashboard.definition.definition = dumps(definition)
else:
dashboard.definition = database.DashboardRecordDef(request.data)
mgr.store_dashboard(dashboard)
return {}
# =============================================================================
# Tags
# =============================================================================
@route_api(api, '/tag/')
def tag_list():
"""Listing for all tags.
"""
tags = db.session.query(database.TagRecord).all()
return _tags_response(tags)
@route_api(api, '/tag/<id>')
def tag_get(id):
tag = database.TagRecord.query.get_or_404(id)
return _tags_response(tag)
# =============================================================================
# Miscellany
# =============================================================================
@route_api(api, '/preferences/')
def preferences_get():
return helpers.get_preferences()
@route_api(api, '/preferences/', methods=['PUT'])
def preferences_put():
helpers.set_preferences(request.json)
return helpers.get_preferences()
| aalpern/tessera | tessera-server/tessera/views_api.py | Python | apache-2.0 | 10,412 | 0.004898 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import os
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
from locust import __version__
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# autoclass options
#autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
#templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Locust'
#copyright = ''
# Intersphinx config
intersphinx_mapping = {
'requests': ('http://requests.readthedocs.org/en/latest/', None),
}
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# Sphinx will recurse into subversion configuration folders and try to read
# any document file within. These should be ignored.
# Note: exclude_dirnames is new in Sphinx 0.5
exclude_dirnames = []
# Options for HTML output
# -----------------------
html_show_sourcelink = False
html_file_suffix = ".html"
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# HTML theme
#html_theme = "haiku"
#html_theme = "default"
#html_theme_options = {
# "rightsidebar": "true",
# "codebgcolor": "#fafcfa",
# "bodyfont": "Arial",
#}
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
| bugsduggan/locust | docs/conf.py | Python | mit | 2,962 | 0.005739 |
import os
from typing import Dict, List, Union
OptionalJSON = Union[List, Dict, float, int, str, bool, None]
def ensure_dir_exists(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def get_dir(directory: str) -> str:
"""
Return a string which contains the complete path to the input directory
Current directory structure:
PATinderBot
src
img
like
match
nope
json
data
:param directory: string of the directory to search for
:return: string with the complete path to the searched for directory
"""
current_dir = os.path.dirname(__file__)
project_dir = os.path.join(current_dir, '..')
result = os.path.join(project_dir, directory)
ensure_dir_exists(result)
return result
| physicalattraction/PATinderBot | src/common.py | Python | gpl-3.0 | 821 | 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
# arranged print
from pprint import pprint, pformat
# Jinja2 Template Engine
from jinja2 import Template, Environment
# JSNAPy
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
from jnpr.jsnapy import SnapAdmin
template_dir_name = './test_templates/'
template_base_name = 'test_bgp_advertised_route.jinja2'
param_advertised_route = {
"neighbor_address_ipv4" : "192.168.35.2",
"advertised_route_address_ipv4" : "10.10.10.0",
"advertised_route_subnet_ipv4" : "24",
}
print 'Load test_template : '
template_filename = template_dir_name + template_base_name
with open(template_filename, 'r') as conf:
template_txt = conf.read()
test_yml = Environment().from_string(template_txt).render(param_advertised_route)
test_base_name = template_base_name.rstrip('.jinja2') +\
'_' + param_advertised_route["neighbor_address_ipv4"] + '.yml'
test_base_name = test_base_name.rstrip('.yml').replace('.','-') + '.yml'
print 'Test file : ' + test_base_name
print 'Test_yml: ' + test_yml
print 'Save test on ./tests : '
test_dir_name = './tests/'
test_filename = test_dir_name + test_base_name
with open(test_filename, 'w') as f:
f.write(test_yml)
print test_filename
jsnapy_config =\
'''
tests:
- %s
''' % (test_filename)
dev1 = Device(
host = '192.168.34.16',
user = 'user1',
password = 'password1')
dev1.open()
jsnapy = SnapAdmin()
snapcheck_dict = jsnapy.snapcheck(
data = jsnapy_config,
dev = dev1,
file_name = "snap01")
print '##### JSNAPy Test : Start #####'
for snapcheck in snapcheck_dict:
print "Devece : ", snapcheck.device
print "Final result : ", snapcheck.result
print "Total passed : ", snapcheck.no_passed
print "Total failed : ", snapcheck.no_failed
print 'snapcheck test_details : '
print '-'*30
pprint(dict(snapcheck.test_details))
print '-'*30
print '##### JSNAPy Test : End #####'
dev1.close() | taijiji/sample_jsnapy | run_test_bgp_advertised_route.py | Python | mit | 2,089 | 0.011967 |
import sys
import numpy as np
if __name__ == '__main__':
print 'Loading word vectors...'
wordvecs = None
wordlist = []
for i, line in enumerate(sys.stdin):
word, vec = line.strip().split(' ', 1)
vec = map(float, vec.split())
if wordvecs is None:
wordvecs = np.ones((400000, len(vec)), dtype=np.float)
wordvecs[i] = vec
wordlist.append(word)
words = dict((k, wordvecs[v]) for v, k in enumerate(wordlist))
tests = [('he', words['he']), ('she', words['she'])]
tests = [
('athens-greece+berlin', words['athens'] - words['greece'] + words['berlin']),
('sydney-australia+berlin', words['sydney'] - words['australia'] + words['berlin']),
('australia-sydney+germany', words['australia'] - words['sydney'] + words['berlin']),
('king-male+female', words['king'] - words['male'] + words['female']),
('king-man+woman', words['king'] - words['man'] + words['woman']),
('queen-female+male', words['queen'] - words['female'] + words['male']),
('queen-woman+man', words['queen'] - words['woman'] + words['man']),
('plane-air+rail', words['train'] - words['air'] + words['rail']),
]
for test, tvec in tests:
results = []
print '=-=-' * 10
print 'Testing {}'.format(test)
res = np.dot(wordvecs, tvec) / np.linalg.norm(tvec) / np.linalg.norm(wordvecs, axis=1)
results = zip(res, wordlist)
print '\n'.join([w for _, w in sorted(results, reverse=True)[:20]])
| Smerity/glove-guante | cosine_similarity.py | Python | mit | 1,457 | 0.008922 |
"""
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def deserialize(self, s):
"""
:type s: str
:rtype: NestedInteger
"""
def parse():
num = ''
while s[0] in '-0123456789':
num += s.pop(0)
if num:
return NestedInteger(int(num))
s.pop(0)
result = NestedInteger()
while s[0] != ']':
result.add(parse())
if s[0] == ',':
s.pop(0)
s.pop(0)
return result
s = list(s + ' ')
return parse()
| dichen001/Go4Jobs | JackChen/string/385. Mini Parser.py | Python | gpl-3.0 | 2,721 | 0.00294 |
"""
Optimal power flow based on branch power flow modelling
Additional case33 is added to the test cases
Note: The proposed method has been verified
@author: Tianyang Zhao
@email: zhaoty@ntu.edu.sg
"""
from Two_stage_stochastic_optimization.power_flow_modelling import case33
from pypower import runopf
from gurobipy import *
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, diag, concatenate
from scipy.sparse import csr_matrix as sparse
from scipy.sparse import hstack, vstack, diags
def run(mpc):
"""
Gurobi based optimal power flow modelling and solution
:param mpc: The input case of optimal power flow
:return: obtained solution
"""
# Data format
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I, QD
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
from pypower.ext2int import ext2int
mpc = ext2int(mpc)
baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"]
nb = shape(mpc['bus'])[0] ## number of buses
nl = shape(mpc['branch'])[0] ## number of branches
ng = shape(mpc['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
Branch_R = branch[:, BR_R]
Branch_X = branch[:, BR_X]
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Qij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = turn_to_power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Qg_l = gen[:, QMIN] / baseMVA
Pij_u = Slmax
Qij_u = Slmax
Iij_u = Slmax
Vm_u = turn_to_power(bus[:, VMAX], 2)
Pg_u = 2 * gen[:, PMAX] / baseMVA
Qg_u = 2 * gen[:, QMAX] / baseMVA
lx = concatenate([Pij_l,Qij_l,Iij_l,Vm_l,Pg_l,Qg_l])
ux = concatenate([Pij_u, Qij_u, Iij_u, Vm_u, Pg_u, Qg_u])
model = Model("OPF")
# Define the decision variables
x = {}
nx = 3 * nl + nb + 2 * ng
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
# Add system level constraints
Aeq_p = hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng))])
beq_p = bus[:, PD] / baseMVA
# Add constraints for each sub system
Aeq_q = hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg])
beq_q = bus[:, QD] / baseMVA
Aeq_KVL = hstack([-2 * diags(Branch_R), -2 * diags(Branch_X),
diags(turn_to_power(Branch_R, 2)) + diags(turn_to_power(Branch_X, 2)), Cf.T - Ct.T,
zeros((nl, 2 * ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_q, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_q, beq_KVL])
neq = len(beq)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(nl):
model.addConstr(x[i]*x[i] + x[i+nl]*x[i+nl] <= x[i+2*nl]*x[f[i]+3*nl], name='"rc{0}"'.format(i))
obj = 0
for i in range(ng):
obj += gencost[i, 4] * x[i + 3 * nl + nb] * x[i + 3 * nl + nb] * baseMVA * baseMVA + gencost[i, 5] * x[
i + 3 * nl + nb] * baseMVA + gencost[i, 6]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij = xx[0:nl]
Qij = xx[nl + 0:2 * nl]
Iij = xx[2 * nl:3 * nl]
Vi = xx[3 * nl:3 * nl + nb]
Pg = xx[3 * nl + nb:3 * nl + nb + ng]
Qg = xx[3 * nl + nb + ng:3 * nl + nb + 2 * ng]
primal_residual = []
for i in range(nl):
primal_residual.append(Pij[i]*Pij[i] + Qij[i]*Qij[i] - Iij[i]*Vi[int(f[i])])
return xx, obj, primal_residual
def turn_to_power(list, power=1):
return [number ** power for number in list]
if __name__ == "__main__":
from pypower import runopf
mpc = case33.case33() # Default test case
(xx, obj,residual) = run(mpc)
result = runopf.runopf(case33.case33())
gap = 100*(result["f"]-obj)/obj
print(gap)
print(max(residual))
| Matrixeigs/Optimization | Two_stage_stochastic_optimization/optimal_power_flows/opf_branch_power.py | Python | mit | 4,744 | 0.005691 |
# coding: utf-8
import json
import os
import time
from graphite_api._vendor import whisper
from . import TestCase, WHISPER_DIR
try:
from flask.ext.cache import Cache
except ImportError:
Cache = None
class RenderTest(TestCase):
db = os.path.join(WHISPER_DIR, 'test.wsp')
url = '/render'
def create_db(self):
whisper.create(self.db, [(1, 60)])
self.ts = int(time.time())
whisper.update(self.db, 0.5, self.ts - 2)
whisper.update(self.db, 0.4, self.ts - 1)
whisper.update(self.db, 0.6, self.ts)
def test_render_view(self):
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'json',
'noCache': 'true'})
self.assertEqual(json.loads(response.data.decode('utf-8')), [])
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'raw',
'noCache': 'true'})
self.assertEqual(response.data.decode('utf-8'), "")
self.assertEqual(response.headers['Content-Type'], 'text/plain')
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'pdf'})
self.assertEqual(response.headers['Content-Type'], 'application/x-pdf')
response = self.app.get(self.url, query_string={'target': 'test'})
self.assertEqual(response.headers['Content-Type'], 'image/png')
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'dygraph',
'noCache': 'true'})
self.assertEqual(json.loads(response.data.decode('utf-8')), {})
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'rickshaw',
'noCache': 'true'})
self.assertEqual(json.loads(response.data.decode('utf-8')), [])
self.create_db()
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
end = data[0]['datapoints'][-4:]
try:
self.assertEqual(
end, [[None, self.ts - 3], [0.5, self.ts - 2],
[0.4, self.ts - 1], [0.6, self.ts]])
except AssertionError:
self.assertEqual(
end, [[0.5, self.ts - 2], [0.4, self.ts - 1],
[0.6, self.ts], [None, self.ts + 1]])
response = self.app.get(self.url, query_string={'target': 'test',
'maxDataPoints': 2,
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
# 1 is a time race cond
self.assertTrue(len(data[0]['datapoints']) in [1, 2])
response = self.app.get(self.url, query_string={'target': 'test',
'maxDataPoints': 200,
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
# 59 is a time race cond
self.assertTrue(len(data[0]['datapoints']) in [59, 60])
response = self.app.get(self.url, query_string={'target': 'test',
'noNullPoints': 1,
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data[0]['datapoints'],
[[0.5, self.ts - 2],
[0.4, self.ts - 1],
[0.6, self.ts]])
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'raw'})
try:
self.assertEqual(
response.data.decode('utf-8'),
'test,%d,%d,1|%s' % (self.ts - 59, self.ts + 1,
'None,' * 57 + '0.5,0.4,0.6\n'))
except AssertionError:
self.assertEqual(
response.data.decode('utf-8'),
'test,%d,%d,1|%s' % (self.ts - 58, self.ts + 2,
'None,' * 56 + '0.5,0.4,0.6,None\n'))
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'dygraph'})
data = json.loads(response.data.decode('utf-8'))
end = data['data'][-4:]
try:
self.assertEqual(
end, [[(self.ts - 3) * 1000, None],
[(self.ts - 2) * 1000, 0.5],
[(self.ts - 1) * 1000, 0.4],
[self.ts * 1000, 0.6]])
except AssertionError:
self.assertEqual(
end, [[(self.ts - 2) * 1000, 0.5],
[(self.ts - 1) * 1000, 0.4],
[self.ts * 1000, 0.6],
[(self.ts + 1) * 1000, None]])
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'rickshaw'})
data = json.loads(response.data.decode('utf-8'))
end = data[0]['datapoints'][-4:]
try:
self.assertEqual(
end, [{'x': self.ts - 3, 'y': None},
{'x': self.ts - 2, 'y': 0.5},
{'x': self.ts - 1, 'y': 0.4},
{'x': self.ts, 'y': 0.6}])
except AssertionError:
self.assertEqual(
end, [{'x': self.ts - 2, 'y': 0.5},
{'x': self.ts - 1, 'y': 0.4},
{'x': self.ts, 'y': 0.6},
{'x': self.ts + 1, 'y': None}])
def test_render_constant_line(self):
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)'})
self.assertEqual(response.headers['Content-Type'], 'image/png')
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)', 'format': 'json'})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(len(data), 3)
for point, ts in data:
self.assertEqual(point, 12)
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)', 'format': 'json',
'maxDataPoints': 12})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(len(data), 3)
for point, ts in data:
self.assertEqual(point, 12)
def test_float_maxdatapoints(self):
response = self.app.get(self.url, query_string={
'target': 'sin("foo")', 'format': 'json',
'maxDataPoints': 5.5}) # rounded to int
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(len(data), 5)
def test_constantline_pathexpr(self):
response = self.app.get(self.url, query_string={
'target': 'sumSeries(constantLine(12), constantLine(5))',
'format': 'json',
})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual([d[0] for d in data], [17, 17, 17])
def test_area_between(self):
response = self.app.get(self.url, query_string={
'target': ['areaBetween(sin("foo"), sin("bar", 2))'],
'format': 'json',
})
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(data), 2)
def test_sumseries(self):
response = self.app.get(self.url, query_string={
'target': ['sumSeries(sin("foo"), sin("bar", 2))',
'sin("baz", 3)'],
'format': 'json',
})
data = json.loads(response.data.decode('utf-8'))
agg = {}
for series in data:
agg[series['target']] = series['datapoints']
for index, value in enumerate(agg['baz']):
self.assertEqual(value, agg['sumSeries(sin(bar),sin(foo))'][index])
response = self.app.get(self.url, query_string={
'target': ['sumSeries(sin("foo"), sin("bar", 2))',
'sin("baz", 3)'],
'format': 'json',
'maxDataPoints': 100,
})
data = json.loads(response.data.decode('utf-8'))
agg = {}
for series in data:
self.assertTrue(len(series['datapoints']) <= 100)
agg[series['target']] = series['datapoints']
for index, value in enumerate(agg['baz']):
self.assertEqual(value, agg['sumSeries(sin(bar),sin(foo))'][index])
def test_correct_timezone(self):
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)',
'format': 'json',
'from': '07:00_20140226',
'until': '08:00_20140226',
# tz is UTC
})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
# all the from/until/tz combinations lead to the same window
expected = [[12, 1393398000], [12, 1393399800], [12, 1393401600]]
self.assertEqual(data, expected)
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)',
'format': 'json',
'from': '08:00_20140226',
'until': '09:00_20140226',
'tz': 'Europe/Berlin',
})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(data, expected)
def test_render_options(self):
self.create_db()
db2 = os.path.join(WHISPER_DIR, 'foo.wsp')
whisper.create(db2, [(1, 60)])
ts = int(time.time())
whisper.update(db2, 0.5, ts - 2)
for qs in [
{'logBase': 'e'},
{'logBase': 1},
{'logBase': 0.5},
{'logBase': 10},
{'margin': -1},
{'colorList': 'orange,green,blue,#0f00f0'},
{'bgcolor': 'orange'},
{'bgcolor': '000000'},
{'bgcolor': '#000000'},
{'bgcolor': '123456'},
{'bgcolor': '#123456'},
{'bgcolor': '#12345678'},
{'bgcolor': 'aaabbb'},
{'bgcolor': '#aaabbb'},
{'bgcolor': '#aaabbbff'},
{'fontBold': 'true'},
{'title': 'Hellò'},
{'title': 'true'},
{'vtitle': 'Hellò'},
{'title': 'Hellò', 'yAxisSide': 'right'},
{'uniqueLegend': 'true', '_expr': 'secondYAxis({0})'},
{'uniqueLegend': 'true', 'vtitleRight': 'foo',
'_expr': 'secondYAxis({0})'},
{'rightWidth': '1', '_expr': 'secondYAxis({0})'},
{'rightDashed': '1', '_expr': 'secondYAxis({0})'},
{'rightColor': 'black', '_expr': 'secondYAxis({0})'},
{'leftWidth': '1', 'target': ['secondYAxis(foo)', 'test']},
{'leftDashed': '1', 'target': ['secondYAxis(foo)', 'test']},
{'leftColor': 'black', 'target': ['secondYAxis(foo)', 'test']},
{'width': '10', '_expr': 'secondYAxis({0})'},
{'logBase': 'e', 'target': ['secondYAxis(foo)', 'test']},
{'graphOnly': 'true', 'yUnitSystem': 'si'},
{'graphOnly': 'true', 'yUnitSystem': 'wat'},
{'lineMode': 'staircase'},
{'lineMode': 'slope'},
{'lineMode': 'slope', 'from': '-1s'},
{'lineMode': 'connected'},
{'min': 1, 'max': 2, 'thickness': 2, 'yUnitSystem': 'none'},
{'yMax': 5, 'yLimit': 0.5, 'yStep': 0.1},
{'yMax': 'max', 'yUnitSystem': 'binary'},
{'yMaxLeft': 5, 'yLimitLeft': 0.5, 'yStepLeft': 0.1,
'_expr': 'secondYAxis({0})'},
{'yMaxRight': 5, 'yLimitRight': 0.5, 'yStepRight': 0.1,
'_expr': 'secondYAxis({0})'},
{'yMin': 0, 'yLimit': 0.5, 'yStep': 0.1},
{'yMinLeft': 0, 'yLimitLeft': 0.5, 'yStepLeft': 0.1,
'_expr': 'secondYAxis({0})'},
{'yMinRight': 0, 'yLimitRight': 0.5, 'yStepRight': 0.1,
'_expr': 'secondYAxis({0})'},
{'areaMode': 'stacked', '_expr': 'stacked({0})'},
{'lineMode': 'staircase', '_expr': 'stacked({0})'},
{'areaMode': 'first', '_expr': 'stacked({0})'},
{'areaMode': 'all', '_expr': 'stacked({0})'},
{'areaMode': 'all', 'areaAlpha': 0.5, '_expr': 'secondYAxis({0})'},
{'areaMode': 'all', 'areaAlpha': 0.5,
'target': ['secondYAxis(foo)', 'test']},
{'areaMode': 'stacked', 'areaAlpha': 0.5, '_expr': 'stacked({0})'},
{'areaMode': 'stacked', 'areaAlpha': 'a', '_expr': 'stacked({0})'},
{'areaMode': 'stacked', '_expr': 'drawAsInfinite({0})'},
{'_expr': 'dashed(lineWidth({0}, 5))'},
{'target': 'areaBetween(*)'},
{'drawNullAsZero': 'true'},
{'_expr': 'drawAsInfinite({0})'},
{'graphType': 'pie', 'pieMode': 'average', 'title': 'Pie'},
{'graphType': 'pie', 'pieMode': 'maximum', 'title': 'Pie'},
{'graphType': 'pie', 'pieMode': 'minimum', 'title': 'Pie'},
{'graphType': 'pie', 'pieMode': 'average', 'hideLegend': 'true'},
{'graphType': 'pie', 'pieMode': 'average', 'valueLabels': 'none'},
{'graphType': 'pie', 'pieMode': 'average',
'valueLabels': 'number'},
{'graphType': 'pie', 'pieMode': 'average', 'pieLabels': 'rotated'},
{'graphType': 'pie', 'pieMode': 'average', 'areaAlpha': '0.1'},
{'graphType': 'pie', 'pieMode': 'average', 'areaAlpha': 'none'},
{'graphType': 'pie', 'pieMode': 'average',
'valueLabelsColor': 'white'},
{'noCache': 'true'},
{'cacheTimeout': 5},
{'cacheTimeout': 5}, # cache hit
{'tz': 'Europe/Berlin'},
]:
if qs.setdefault('target', ['foo', 'test']) == ['foo', 'test']:
if '_expr' in qs:
expr = qs.pop('_expr')
qs['target'] = [expr.format(t) for t in qs['target']]
response = self.app.get(self.url, query_string=qs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'image/png')
if Cache is None or qs.get('noCache'):
self.assertEqual(response.headers['Pragma'], 'no-cache')
self.assertEqual(response.headers['Cache-Control'], 'no-cache')
self.assertFalse('Expires' in response.headers)
else:
self.assertEqual(response.headers['Cache-Control'],
'max-age={0}'.format(
qs.get('cacheTimeout', 60)))
self.assertNotEqual(response.headers['Cache-Control'],
'no-cache')
self.assertFalse('Pragma' in response.headers)
for qs in [
{'bgcolor': 'foo'},
]:
qs['target'] = 'test'
with self.assertRaises(ValueError):
response = self.app.get(self.url, query_string=qs)
for qs in [
{'lineMode': 'stacked'},
]:
qs['target'] = 'test'
with self.assertRaises(AssertionError):
response = self.app.get(self.url, query_string=qs)
def test_render_validation(self):
whisper.create(self.db, [(1, 60)])
response = self.app.get(self.url)
self.assertJSON(response, {'errors': {
'target': 'This parameter is required.'}}, status_code=400)
response = self.app.get(self.url, query_string={'graphType': 'foo',
'target': 'test'})
self.assertJSON(response, {'errors': {
'graphType': "Invalid graphType 'foo', must be one of 'line', "
"'pie'."}}, status_code=400)
response = self.app.get(self.url, query_string={'maxDataPoints': 'foo',
'target': 'test'})
self.assertJSON(response, {'errors': {
'maxDataPoints': 'Must be an integer.'}}, status_code=400)
response = self.app.get(self.url, query_string={
'from': '21:2020140313',
'until': '21:2020140313',
'target': 'test'})
self.assertJSON(response, {'errors': {
'from': 'Invalid empty time range',
'until': 'Invalid empty time range',
}}, status_code=400)
response = self.app.get(self.url, query_string={
'target': 'foo',
'width': 100,
'thickness': '1.5',
'fontBold': 'true',
'fontItalic': 'default',
})
self.assertEqual(response.status_code, 200)
response = self.app.get(self.url, query_string={
'target': 'foo', 'tz': 'Europe/Lausanne'})
self.assertJSON(response, {'errors': {
'tz': "Unknown timezone: 'Europe/Lausanne'.",
}}, status_code=400)
response = self.app.get(self.url, query_string={'target': 'test:aa',
'graphType': 'pie'})
self.assertJSON(response, {'errors': {
'target': "Invalid target: 'test:aa'.",
}}, status_code=400)
response = self.app.get(self.url, query_string={
'target': ['test', 'foo:1.2'], 'graphType': 'pie'})
self.assertEqual(response.status_code, 200)
response = self.app.get(self.url, query_string={'target': ['test',
'']})
self.assertEqual(response.status_code, 200)
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'csv'})
lines = response.data.decode('utf-8').strip().split('\n')
# 59 is a time race cond
self.assertTrue(len(lines) in [59, 60])
self.assertFalse(any([l.strip().split(',')[2] for l in lines]))
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'svg',
'jsonp': 'foo'})
jsonpsvg = response.data.decode('utf-8')
self.assertTrue(jsonpsvg.startswith('foo("<?xml version=\\"1.0\\"'))
self.assertTrue(jsonpsvg.endswith('</script>\\n</svg>")'))
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'svg'})
svg = response.data.decode('utf-8')
self.assertTrue(svg.startswith('<?xml version="1.0"'))
response = self.app.get(self.url, query_string={'target': 'inexisting',
'format': 'svg'})
self.assertEqual(response.status_code, 200)
svg = response.data.decode('utf-8')
self.assertTrue(svg.startswith('<?xml version="1.0"'))
response = self.app.get(self.url, query_string={
'target': 'sum(test)',
})
self.assertEqual(response.status_code, 200)
response = self.app.get(self.url, query_string={
'target': ['sinFunction("a test", 2)',
'sinFunction("other test", 2.1)',
'sinFunction("other test", 2e1)'],
})
self.assertEqual(response.status_code, 200)
response = self.app.get(self.url, query_string={
'target': ['percentileOfSeries(sin("foo bar"), 95, true)']
})
self.assertEqual(response.status_code, 200)
def test_raw_data(self):
whisper.create(self.db, [(1, 60)])
response = self.app.get(self.url, query_string={'rawData': '1',
'target': 'test'})
info, data = response.data.decode('utf-8').strip().split('|', 1)
path, start, stop, step = info.split(',')
datapoints = data.split(',')
try:
self.assertEqual(datapoints, ['None'] * 60)
self.assertEqual(int(stop) - int(start), 60)
except AssertionError:
self.assertEqual(datapoints, ['None'] * 59)
self.assertEqual(int(stop) - int(start), 59)
self.assertEqual(path, 'test')
self.assertEqual(int(step), 1)
def test_jsonp(self):
whisper.create(self.db, [(1, 60)])
start = int(time.time()) - 59
response = self.app.get(self.url, query_string={'format': 'json',
'jsonp': 'foo',
'target': 'test'})
data = response.data.decode('utf-8')
self.assertTrue(data.startswith('foo('))
data = json.loads(data[4:-1])
try:
self.assertEqual(data, [{'datapoints': [
[None, start + i] for i in range(60)
], 'target': 'test'}])
except AssertionError: # Race condition when time overlaps a second
self.assertEqual(data, [{'datapoints': [
[None, start + i + 1] for i in range(60)
], 'target': 'test'}])
def test_sorted(self):
for db in (
('test', 'foo.wsp'),
('test', 'welp.wsp'),
('test', 'baz.wsp'),
):
db_path = os.path.join(WHISPER_DIR, *db)
if not os.path.exists(os.path.dirname(db_path)):
os.makedirs(os.path.dirname(db_path))
whisper.create(db_path, [(1, 60)])
response = self.app.get(self.url, query_string={'rawData': '1',
'target': 'test.*'})
dses = response.data.decode('utf-8').strip().split("\n")
paths = []
for ds in dses:
info, data = ds.strip().split('|', 1)
path, start, stop, step = info.split(',')
paths.append(path)
self.assertEqual(paths, ['test.baz', 'test.foo', 'test.welp'])
def test_bootstrap_fetch_outside_range(self):
self.create_db()
response = self.app.get(
self.url, query_string={
'target': "aliasByNode(movingMedian(test, '15min'), 0)",
},
)
self.assertEqual(response.status_code, 200)
def test_templates(self):
ts = int(time.time())
value = 1
for db in (
('hosts', 'worker1', 'cpu.wsp'),
('hosts', 'worker2', 'cpu.wsp'),
):
db_path = os.path.join(WHISPER_DIR, *db)
if not os.path.exists(os.path.dirname(db_path)):
os.makedirs(os.path.dirname(db_path))
whisper.create(db_path, [(1, 60)])
whisper.update(db_path, value, ts)
value += 1
for query, expected in [
({'target': 'template(hosts.worker1.cpu)'}, 'hosts.worker1.cpu'),
({'target': 'template(constantLine($1),12)'}, '12'),
({'target': 'template(constantLine($1))',
'template[1]': '12'}, '12.0'),
({'target': 'template(constantLine($num),num=12)'}, '12'),
({'target': 'template(constantLine($num))',
'template[num]': '12'}, '12.0'),
({'target': 'template(time($1),"nameOfSeries")'}, 'nameOfSeries'),
({'target': 'template(time($1))',
'template[1]': 'nameOfSeries'}, 'nameOfSeries'),
({'target': 'template(time($name),name="nameOfSeries")'},
'nameOfSeries'),
({'target': 'template(time($name))',
'template[name]': 'nameOfSeries'}, 'nameOfSeries'),
({'target': 'template(sumSeries(hosts.$1.cpu),"worker1")'},
'sumSeries(hosts.worker1.cpu)'),
({'target': 'template(sumSeries(hosts.$1.cpu))',
'template[1]': 'worker*'}, 'sumSeries(hosts.worker*.cpu)'),
({'target': 'template(sumSeries(hosts.$host.cpu))',
'template[host]': 'worker*'}, 'sumSeries(hosts.worker*.cpu)'),
]:
query['format'] = 'json'
response = self.app.get(self.url, query_string=query)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data[0]['target'], expected)
| winguru/graphite-api | tests/test_render.py | Python | apache-2.0 | 24,960 | 0.00004 |
from common.persistence import from_pickle
NWORDS = from_pickle('../data/en_dict.pkl')
print(len(NWORDS))
print(NWORDS['word'])
print(NWORDS['spell'])
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b) > 1]
replaces = [a + c + b[1:] for a, b in s for c in alphabet if b]
inserts = [a + c + b for a, b in s for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
################ Testing code from here on ################
def spelltest(tests, bias=None, verbose=False):
import time
n, bad, unknown, start = 0, 0, 0, time.clock()
if bias:
for target in tests: NWORDS[target] += bias
for target, wrongs in tests.items():
for wrong in wrongs.split():
n += 1
w = correct(wrong)
if w != target:
bad += 1
unknown += (target not in NWORDS)
if verbose:
print 'correct(%r) => %r (%d); expected %r (%d)' % (
wrong, w, NWORDS[w], target, NWORDS[target])
return dict(bad=bad, n=n, bias=bias, pct=int(100. - 100. * bad / n),
unknown=unknown, secs=int(time.clock() - start))
tests1 = {'access': 'acess', 'accessing': 'accesing', 'accommodation':
'accomodation acommodation acomodation', 'account': 'acount', 'address':
'adress adres', 'addressable': 'addresable', 'arranged': 'aranged arrainged',
'arrangeing': 'aranging', 'arrangement': 'arragment', 'articles': 'articals',
'aunt': 'annt anut arnt', 'auxiliary': 'auxillary', 'available': 'avaible',
'awful': 'awfall afful', 'basically': 'basicaly', 'beginning': 'begining',
'benefit': 'benifit', 'benefits': 'benifits', 'between': 'beetween', 'bicycle':
'bicycal bycicle bycycle', 'biscuits':
'biscits biscutes biscuts bisquits buiscits buiscuts', 'built': 'biult',
'cake': 'cak', 'career': 'carrer',
'cemetery': 'cemetary semetary', 'centrally': 'centraly', 'certain': 'cirtain',
'challenges': 'chalenges chalenges', 'chapter': 'chaper chaphter chaptur',
'choice': 'choise', 'choosing': 'chosing', 'clerical': 'clearical',
'committee': 'comittee', 'compare': 'compair', 'completely': 'completly',
'consider': 'concider', 'considerable': 'conciderable', 'contented':
'contenpted contende contended contentid', 'curtains':
'cartains certans courtens cuaritains curtans curtians curtions', 'decide': 'descide', 'decided':
'descided', 'definitely': 'definately difinately', 'definition': 'defenition',
'definitions': 'defenitions', 'description': 'discription', 'desiccate':
'desicate dessicate dessiccate', 'diagrammatically': 'diagrammaticaally',
'different': 'diffrent', 'driven': 'dirven', 'ecstasy': 'exstacy ecstacy',
'embarrass': 'embaras embarass', 'establishing': 'astablishing establising',
'experience': 'experance experiance', 'experiences': 'experances', 'extended':
'extented', 'extremely': 'extreamly', 'fails': 'failes', 'families': 'familes',
'february': 'febuary', 'further': 'futher', 'gallery': 'galery gallary gallerry gallrey',
'hierarchal': 'hierachial', 'hierarchy': 'hierchy', 'inconvenient':
'inconvienient inconvient inconvinient', 'independent': 'independant independant',
'initial': 'intial', 'initials': 'inetials inistals initails initals intials',
'juice': 'guic juce jucie juise juse', 'latest': 'lates latets latiest latist',
'laugh': 'lagh lauf laught lugh', 'level': 'leval',
'levels': 'levals', 'liaison': 'liaision liason', 'lieu': 'liew', 'literature':
'litriture', 'loans': 'lones', 'locally': 'localy', 'magnificent':
'magnificnet magificent magnifcent magnifecent magnifiscant magnifisent magnificant',
'management': 'managment', 'meant': 'ment', 'minuscule': 'miniscule',
'minutes': 'muinets', 'monitoring': 'monitering', 'necessary':
'neccesary necesary neccesary necassary necassery neccasary', 'occurrence':
'occurence occurence', 'often': 'ofen offen offten ofton', 'opposite':
'opisite oppasite oppesite oppisit oppisite opposit oppossite oppossitte', 'parallel':
'paralel paralell parrallel parralell parrallell', 'particular': 'particulaur',
'perhaps': 'perhapse', 'personnel': 'personnell', 'planned': 'planed', 'poem':
'poame', 'poems': 'poims pomes', 'poetry': 'poartry poertry poetre poety powetry',
'position': 'possition', 'possible': 'possable', 'pretend':
'pertend protend prtend pritend', 'problem': 'problam proble promblem proplen',
'pronunciation': 'pronounciation', 'purple': 'perple perpul poarple',
'questionnaire': 'questionaire', 'really': 'realy relley relly', 'receipt':
'receit receite reciet recipt', 'receive': 'recieve', 'refreshment':
'reafreshment refreshmant refresment refressmunt', 'remember': 'rember remeber rememmer rermember',
'remind': 'remine remined', 'scarcely': 'scarcly scarecly scarely scarsely',
'scissors': 'scisors sissors', 'separate': 'seperate',
'singular': 'singulaur', 'someone': 'somone', 'sources': 'sorces', 'southern':
'southen', 'special': 'speaical specail specal speical', 'splendid':
'spledid splended splened splended', 'standardizing': 'stanerdizing', 'stomach':
'stomac stomache stomec stumache', 'supersede': 'supercede superceed', 'there': 'ther',
'totally': 'totaly', 'transferred': 'transfred', 'transportability':
'transportibility', 'triangular': 'triangulaur', 'understand': 'undersand undistand',
'unexpected': 'unexpcted unexpeted unexspected', 'unfortunately':
'unfortunatly', 'unique': 'uneque', 'useful': 'usefull', 'valuable': 'valubale valuble',
'variable': 'varable', 'variant': 'vairiant', 'various': 'vairious',
'visited': 'fisited viseted vistid vistied', 'visitors': 'vistors',
'voluntary': 'volantry', 'voting': 'voteing', 'wanted': 'wantid wonted',
'whether': 'wether', 'wrote': 'rote wote'}
tests2 = {'forbidden': 'forbiden', 'decisions': 'deciscions descisions',
'supposedly': 'supposidly', 'embellishing': 'embelishing', 'technique':
'tecnique', 'permanently': 'perminantly', 'confirmation': 'confermation',
'appointment': 'appoitment', 'progression': 'progresion', 'accompanying':
'acompaning', 'applicable': 'aplicable', 'regained': 'regined', 'guidelines':
'guidlines', 'surrounding': 'serounding', 'titles': 'tittles', 'unavailable':
'unavailble', 'advantageous': 'advantageos', 'brief': 'brif', 'appeal':
'apeal', 'consisting': 'consisiting', 'clerk': 'cleark clerck', 'component':
'componant', 'favourable': 'faverable', 'separation': 'seperation', 'search':
'serch', 'receive': 'recieve', 'employees': 'emploies', 'prior': 'piror',
'resulting': 'reulting', 'suggestion': 'sugestion', 'opinion': 'oppinion',
'cancellation': 'cancelation', 'criticism': 'citisum', 'useful': 'usful',
'humour': 'humor', 'anomalies': 'anomolies', 'would': 'whould', 'doubt':
'doupt', 'examination': 'eximination', 'therefore': 'therefoe', 'recommend':
'recomend', 'separated': 'seperated', 'successful': 'sucssuful succesful',
'apparent': 'apparant', 'occurred': 'occureed', 'particular': 'paerticulaur',
'pivoting': 'pivting', 'announcing': 'anouncing', 'challenge': 'chalange',
'arrangements': 'araingements', 'proportions': 'proprtions', 'organized':
'oranised', 'accept': 'acept', 'dependence': 'dependance', 'unequalled':
'unequaled', 'numbers': 'numbuers', 'sense': 'sence', 'conversely':
'conversly', 'provide': 'provid', 'arrangement': 'arrangment',
'responsibilities': 'responsiblities', 'fourth': 'forth', 'ordinary':
'ordenary', 'description': 'desription descvription desacription',
'inconceivable': 'inconcievable', 'data': 'dsata', 'register': 'rgister',
'supervision': 'supervison', 'encompassing': 'encompasing', 'negligible':
'negligable', 'allow': 'alow', 'operations': 'operatins', 'executed':
'executted', 'interpretation': 'interpritation', 'hierarchy': 'heiarky',
'indeed': 'indead', 'years': 'yesars', 'through': 'throut', 'committee':
'committe', 'inquiries': 'equiries', 'before': 'befor', 'continued':
'contuned', 'permanent': 'perminant', 'choose': 'chose', 'virtually':
'vertually', 'correspondence': 'correspondance', 'eventually': 'eventully',
'lonely': 'lonley', 'profession': 'preffeson', 'they': 'thay', 'now': 'noe',
'desperately': 'despratly', 'university': 'unversity', 'adjournment':
'adjurnment', 'possibilities': 'possablities', 'stopped': 'stoped', 'mean':
'meen', 'weighted': 'wagted', 'adequately': 'adequattly', 'shown': 'hown',
'matrix': 'matriiix', 'profit': 'proffit', 'encourage': 'encorage', 'collate':
'colate', 'disaggregate': 'disaggreagte disaggreaget', 'receiving':
'recieving reciving', 'proviso': 'provisoe', 'umbrella': 'umberalla', 'approached':
'aproached', 'pleasant': 'plesent', 'difficulty': 'dificulty', 'appointments':
'apointments', 'base': 'basse', 'conditioning': 'conditining', 'earliest':
'earlyest', 'beginning': 'begining', 'universally': 'universaly',
'unresolved': 'unresloved', 'length': 'lengh', 'exponentially':
'exponentualy', 'utilized': 'utalised', 'set': 'et', 'surveys': 'servays',
'families': 'familys', 'system': 'sysem', 'approximately': 'aproximatly',
'their': 'ther', 'scheme': 'scheem', 'speaking': 'speeking', 'repetitive':
'repetative', 'inefficient': 'ineffiect', 'geneva': 'geniva', 'exactly':
'exsactly', 'immediate': 'imediate', 'appreciation': 'apreciation', 'luckily':
'luckeley', 'eliminated': 'elimiated', 'believe': 'belive', 'appreciated':
'apreciated', 'readjusted': 'reajusted', 'were': 'wer where', 'feeling':
'fealing', 'and': 'anf', 'false': 'faulse', 'seen': 'seeen', 'interrogating':
'interogationg', 'academically': 'academicly', 'relatively': 'relativly relitivly',
'traditionally': 'traditionaly', 'studying': 'studing',
'majority': 'majorty', 'build': 'biuld', 'aggravating': 'agravating',
'transactions': 'trasactions', 'arguing': 'aurguing', 'sheets': 'sheertes',
'successive': 'sucsesive sucessive', 'segment': 'segemnt', 'especially':
'especaily', 'later': 'latter', 'senior': 'sienior', 'dragged': 'draged',
'atmosphere': 'atmospher', 'drastically': 'drasticaly', 'particularly':
'particulary', 'visitor': 'vistor', 'session': 'sesion', 'continually':
'contually', 'availability': 'avaiblity', 'busy': 'buisy', 'parameters':
'perametres', 'surroundings': 'suroundings seroundings', 'employed':
'emploied', 'adequate': 'adiquate', 'handle': 'handel', 'means': 'meens',
'familiar': 'familer', 'between': 'beeteen', 'overall': 'overal', 'timing':
'timeing', 'committees': 'comittees commitees', 'queries': 'quies',
'econometric': 'economtric', 'erroneous': 'errounous', 'decides': 'descides',
'reference': 'refereence refference', 'intelligence': 'inteligence',
'edition': 'ediion ediition', 'are': 'arte', 'apologies': 'appologies',
'thermawear': 'thermawere thermawhere', 'techniques': 'tecniques',
'voluntary': 'volantary', 'subsequent': 'subsequant subsiquent', 'currently':
'curruntly', 'forecast': 'forcast', 'weapons': 'wepons', 'routine': 'rouint',
'neither': 'niether', 'approach': 'aproach', 'available': 'availble',
'recently': 'reciently', 'ability': 'ablity', 'nature': 'natior',
'commercial': 'comersial', 'agencies': 'agences', 'however': 'howeverr',
'suggested': 'sugested', 'career': 'carear', 'many': 'mony', 'annual':
'anual', 'according': 'acording', 'receives': 'recives recieves',
'interesting': 'intresting', 'expense': 'expence', 'relevant':
'relavent relevaant', 'table': 'tasble', 'throughout': 'throuout', 'conference':
'conferance', 'sensible': 'sensable', 'described': 'discribed describd',
'union': 'unioun', 'interest': 'intrest', 'flexible': 'flexable', 'refered':
'reffered', 'controlled': 'controled', 'sufficient': 'suficient',
'dissension': 'desention', 'adaptable': 'adabtable', 'representative':
'representitive', 'irrelevant': 'irrelavent', 'unnecessarily': 'unessasarily',
'applied': 'upplied', 'apologised': 'appologised', 'these': 'thees thess',
'choices': 'choises', 'will': 'wil', 'procedure': 'proceduer', 'shortened':
'shortend', 'manually': 'manualy', 'disappointing': 'dissapoiting',
'excessively': 'exessively', 'comments': 'coments', 'containing': 'containg',
'develop': 'develope', 'credit': 'creadit', 'government': 'goverment',
'acquaintances': 'aquantences', 'orientated': 'orentated', 'widely': 'widly',
'advise': 'advice', 'difficult': 'dificult', 'investigated': 'investegated',
'bonus': 'bonas', 'conceived': 'concieved', 'nationally': 'nationaly',
'compared': 'comppared compased', 'moving': 'moveing', 'necessity':
'nessesity', 'opportunity': 'oppertunity oppotunity opperttunity', 'thoughts':
'thorts', 'equalled': 'equaled', 'variety': 'variatry', 'analysis':
'analiss analsis analisis', 'patterns': 'pattarns', 'qualities': 'quaties', 'easily':
'easyly', 'organization': 'oranisation oragnisation', 'the': 'thw hte thi',
'corporate': 'corparate', 'composed': 'compossed', 'enormously': 'enomosly',
'financially': 'financialy', 'functionally': 'functionaly', 'discipline':
'disiplin', 'announcement': 'anouncement', 'progresses': 'progressess',
'except': 'excxept', 'recommending': 'recomending', 'mathematically':
'mathematicaly', 'source': 'sorce', 'combine': 'comibine', 'input': 'inut',
'careers': 'currers carrers', 'resolved': 'resoved', 'demands': 'diemands',
'unequivocally': 'unequivocaly', 'suffering': 'suufering', 'immediately':
'imidatly imediatly', 'accepted': 'acepted', 'projects': 'projeccts',
'necessary': 'necasery nessasary nessisary neccassary', 'journalism':
'journaism', 'unnecessary': 'unessessay', 'night': 'nite', 'output':
'oputput', 'security': 'seurity', 'essential': 'esential', 'beneficial':
'benificial benficial', 'explaining': 'explaning', 'supplementary':
'suplementary', 'questionnaire': 'questionare', 'employment': 'empolyment',
'proceeding': 'proceding', 'decision': 'descisions descision', 'per': 'pere',
'discretion': 'discresion', 'reaching': 'reching', 'analysed': 'analised',
'expansion': 'expanion', 'although': 'athough', 'subtract': 'subtrcat',
'analysing': 'aalysing', 'comparison': 'comparrison', 'months': 'monthes',
'hierarchal': 'hierachial', 'misleading': 'missleading', 'commit': 'comit',
'auguments': 'aurgument', 'within': 'withing', 'obtaining': 'optaning',
'accounts': 'acounts', 'primarily': 'pimarily', 'operator': 'opertor',
'accumulated': 'acumulated', 'extremely': 'extreemly', 'there': 'thear',
'summarys': 'sumarys', 'analyse': 'analiss', 'understandable':
'understadable', 'safeguard': 'safegaurd', 'consist': 'consisit',
'declarations': 'declaratrions', 'minutes': 'muinutes muiuets', 'associated':
'assosiated', 'accessibility': 'accessability', 'examine': 'examin',
'surveying': 'servaying', 'politics': 'polatics', 'annoying': 'anoying',
'again': 'agiin', 'assessing': 'accesing', 'ideally': 'idealy', 'scrutinized':
'scrutiniesed', 'simular': 'similar', 'personnel': 'personel', 'whereas':
'wheras', 'when': 'whn', 'geographically': 'goegraphicaly', 'gaining':
'ganing', 'requested': 'rquested', 'separate': 'seporate', 'students':
'studens', 'prepared': 'prepaired', 'generated': 'generataed', 'graphically':
'graphicaly', 'suited': 'suted', 'variable': 'varible vaiable', 'building':
'biulding', 'required': 'reequired', 'necessitates': 'nessisitates',
'together': 'togehter', 'profits': 'proffits'}
if __name__ == '__main__':
print spelltest(tests1, verbose=True)
print spelltest(tests2, verbose=True)
| anderscui/spellchecker | simple_checker/checker_tests_google_dict.py | Python | mit | 17,844 | 0.00863 |
#
# humanagent.py - provides server-side backend for interaction with
# human players in team formation
#
# Copyright (C) 2015 Nathan Dykhuis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Agent class to allow humans on client computers to play team formation.
The HumanAgent class is the server-side backend of the human interaction
system. It performs all calculation of payoffs, etc, and forwards the info
to the Frontend client over TCP. The Frontend client is a fairly-thin wrapper
which simply takes messages describing what screen to display, with what info,
and receives input from the user.
Numerous functions use send_and_receive to ask for user input, and ensure that
the program (or thread) blocks until it has been received.
"""
import json
import numpy as np
from configuration import Configuration
from agentgroup import Agent
from utils import send_message, receive_message, send_and_receive
CURR = u'\xA7'
class HumanAgent(Agent):
"""HumanAgent class implements all functions necessary for team formation
Each function gathers the relevant information, then ships it off to a
Frontend instance over TCP for evaluation and decision by a human player.
Attributes:
slow: always True, since humans take time to decide
type: 'human'
client: the sockets connection to the Frontend
finalpay: total pay, only set after the exit survey is submitted
messages: list accumulator of message strings that are sent to the user as
a summary of the turn during the postprocess stage.
"""
def __init__(self, cfg, connection, adat=None, skills=None, aid=None):
super(HumanAgent, self).__init__(cfg, adat, skills, aid)
self.client = connection
self.messages = []
self.slow = True
self.type = 'human'
# THIS IS DUPLICATED FROM FRONTEND.PY - should move to configuration?
self.gletters = [chr(ord('A')+i) for i in range(Configuration.n)]
if Configuration.hide_publicgoods:
self.anames = ['Agent '+str(i) for i in range(Configuration.n)]
else:
# Icky hardcoding
self.anames = ['Cat', 'Dog', 'Bear', 'Monkey', 'Cow', 'Elephant',
'Gorilla', 'Fish', 'Sheep', 'Frog', 'Bird', 'Lion',
'Owl', 'Panda', 'Penguin', 'Pig', 'Rabbit', 'Rooster',
'Bee', 'Donkey']
self.current_ratings = {}
self.finalpay = -1
send_message(self.client, ('setmyid', self.id))
self.sendcfg()
def sendcfg(self):
"""Send the current configuration to the client as a dictionary.
Sends only the variables which can be packed with JSON.
Blocks until client confirms that it has received the message
"""
cfgdict = self.cfg.outputcfg(showhidden=True)
# remove all things that are not jsonnable
jsoncfg = {}
for k,v in cfgdict.iteritems():
try: # This is hacky...
json.dumps(v)
jsoncfg[k] = v
except TypeError:
pass
send_and_receive(self.client, ('setconfig', jsoncfg))
# Make sure the config gets set before moving on
def gname(self, gid):
"""Get the name (letter) of a group from an integer group ID"""
return self.gletters[gid]
def aname(self, aid):
"""Get the name of an agent from an integer agent ID"""
if aid == self.id:
return "You"
return self.anames[aid]
def initvideo(self):
"""Tell client to start video capture and open a preview window"""
cfg = self.cfg
# send additional video info here
vdata = (cfg._do_video, cfg._dblog.sessionid, self.id)
send_message(self.client, ('initvideo', vdata))
send_message(self.client, ('startpreview', 0))
def instructions(self):
"""Tell client to show instructions screen and close preview window"""
if self.cfg.do_ratings: self.hideratings()
send_message(self.client, ('endpreview', 0))
self.sendcfg()
send_and_receive(self.client, ('instructions', 0))
#if self.cfg.do_ratings: self.showratings()
self.logp(("Instructions done for", self.id))
def initratings(self, neighbors):
"""Tell client to create the ratings sidebar"""
send_message(self.client, ('initratings', neighbors))
def showratings(self):
"""Tell client to show the ratings sidebar"""
send_message(self.client, ('showratings', 0))
def hideratings(self):
"""Tell client to hide the ratings sidebar"""
send_message(self.client, ('hideratings', 0))
def disableratings(self):
"""Tell client to make ratings sidebar un-clickable"""
send_message(self.client, ('disableratings', 0))
def introsurvey(self):
"""Tell client to present intro survey screen, and record response"""
gender, college, status = send_and_receive(self.client, ('introsurvey', 0))
self.cfg._dblog.log_introsurvey(self.id, (gender, college, status))
def exitsurvey(self):
"""Tell client to present exit survey, and after submit, get final pay"""
self.logratings()
send_message(self.client, ('exitsurvey', 0))
# Receive num of questions, and then each question and response
n_qs = receive_message(self.client)
responses = []
for i in range(n_qs):
(qtext, qresponse) = receive_message(self.client)
responses.append( (qtext, qresponse) )
self.finalpay = receive_message(self.client)
self.hideratings()
self.cfg._dblog.log_exitsurvey(self.id, responses)
self.logratings(step='exitsurvey')
self.logratingstatus('final', range(self.cfg.n)) # Log ratings of everyone
self.logp(("Agent", self.id, "exit survey submitted"), 0)
def startcapture(self):
"""Client: start video capture"""
send_message(self.client, ('startcapture', 0))
def stopcapture(self):
"""Client: pause video capture"""
send_message(self.client, ('stopcapture', 0))
def endcapture(self):
"""Client: terminate video capture"""
send_message(self.client, ('endcapture', 0))
def update(self):
"""Update current pay and neighbors here and in the GUI"""
if self.cfg.bias:
self.nowpay = self.nowpaycalc(self.cfg.task(self.group.skills))
else:
self.nowpay = self.cfg.task(self.group.skills)/self.group.gsize
send_message(self.client, ('updatepay', self.nowpay) )
if self.cfg.show_skills:
send_message(self.client, ('updatemyteam', (self.group.id, int(np.where(self.skills)[0][0])) ))
else:
send_message(self.client, ('updatemyteam', (self.group.id, -1)) )
send_message(self.client, ('updateteam', sorted([a.id for a in self.group.agents])))
self.updatenbrs()
def updatenbrs(self):
"""Update graphical view of neighbors in the GUI"""
#nbrdata = [(n.id, n.group.id) for n in self.nbrs] # old nbrdata
if self.cfg.show_skills:
nbrdata = [(n.id, n.group.id, int(np.where(n.skills)[0][0])) for n in self.nbrs]
else:
nbrdata = [(n.id, n.group.id, -1) for n in self.nbrs]
send_message(self.client, ('updatenbrs', nbrdata) )
def getframetimes(self):
"""Get the start and end frame numbers and timestamps from the last event.
Returns:
tuple of (start frame, end frame, start time, end time)
frame numbers are ints, times are Unix timestamps
"""
return send_and_receive(self.client, ('getframetimes', 0))
def logratings(self, simnum = None, iternum = None, step = 'NA'):
"""Get all accumulated ratings from the client and log to database.
Also update self.current_ratings with the most recent rating assigned.
Arguments:
simnum: the current round number
iternum: the current iteration number
step: the current step (apply, acceptvote, join, expel, pubgood, etc.)
All ratings fetched will be marked with these values, so to ensure that
ratings are assigned to the correct step, collect ratings once before the
step starts, and again after the step ends, marking with the step name on
the second call. (see usage in code)
"""
if not self.cfg.do_ratings: return
ratings = send_and_receive(self.client, ('getratinglog', 0))
if not len(ratings):
return
if not simnum:
try:
simnum, iternum = self.cfg.simnumber, self.cfg.iternum
except AttributeError:
simnum, iternum = -1, -1
try:
for r in ratings:
r.extend( [simnum, iternum, step] )
self.current_ratings[r[1]] = r[2]
self.cfg._dblog.log_ratings(ratings)
except AttributeError:
print "PROBLEM!"
print "ratings data is:", ratings
self.logp(("Agent", self.id, "ratings:", ratings))
def logratingstatus(self, eventtype, otherids, gmembers=None):
"""Log the current ratings that a user is seeing when making a decision.
If gmembers is None, then otherids is a list of agent IDs, as during the
acceptvote or pubgood steps.
If gmembers is not None, then otherids is a list of group IDs, and gmembers
is a list of lists of the agent IDs of the members of each group, used
during the apply or join steps.
"""
if gmembers is not None:
rtgs = [[self.current_ratings.get(aid) for aid in g if aid in self.current_ratings]
for g in gmembers if len(g)]
grtgs = [[self.global_ratings.get(aid) for aid in g if aid in self.global_ratings]
for g in gmembers if len(g)]
myrtgs = [-1 if not len(rats) else float(sum(rats))/len(rats) for rats in rtgs]
if self.cfg.show_global_ratings:
globalrtgs = [-1 if not len(grats) else float(sum(grats))/len(grats)
for grats in grtgs]
else:
globalrtgs = [-1 for rats in rtgs]
minrtgs = [-1 if not len(rats) else min(rats) for rats in rtgs]
maxrtgs = [-1 if not len(rats) else max(rats) for rats in rtgs]
else:
myrtgs = [self.current_ratings.get(aid, -1) for aid in otherids]
globalrtgs = [-1 if not self.cfg.show_global_ratings
else self.global_ratings.get(aid, -1)
for aid in otherids]
minrtgs = maxrtgs = [-1 for aid in otherids]
try:
self.cfg._dblog.log_ratingstatus(
self.cfg.simnumber, self.cfg.iternum, eventtype, self.id, otherids,
myrtgs, globalrtgs, minrtgs, maxrtgs
)
except AttributeError:
self.cfg._dblog.log_ratingstatus(
self.cfg.simnumber, -1, eventtype, self.id, otherids,
myrtgs, globalrtgs, minrtgs, maxrtgs
)
def getratings(self):
"""Get current ratings from the client, and return them.
Returns:
dictionary mapping agent ID (int) to rating (int)
"""
ratings = send_and_receive(self.client, ('getratings', 0))
self.current_ratings.update(ratings)
return self.current_ratings
def updateratings(self, ratings):
"""Send the current global ratings to the client to update UI.
Arguments:
ratings: dictionary mapping agent ID (int) to rating (int, 1-5)
"""
self.global_ratings = ratings
if self.cfg.show_global_ratings:
send_message(self.client, ('updateglobalratings', ratings))
def updatehistory(self, pghistory):
"""Send public contribution history information to the client to update UI.
Arguments:
pghistory: dictionary mapping agent ID to list of previous contributions
"""
send_message(self.client, ('updatehistory', pghistory))
def propose(self):
task = self.cfg.task
self.update() ## Maybe needs to come after the propose message!
nbrgroups = set( n.group for n in self.nbrs )
nbrgroups.discard(self.group)
if not len(nbrgroups):
return
idgroups = {g.id:g for g in nbrgroups}
gdata = sorted([ (g.id, g.gsize, task(g.withskills(self))/(g.gsize+1),
[a.id for a in g.agents]) for g in nbrgroups])
gids, gsizes, newpays, gmembers = zip(*gdata)
self.logratings()
self.logratingstatus('apply', gids+(-1, ), gmembers+([a.id for a in self.group.agents],))
# Send all data to GUI and blocking receive
# Wait for user to reply with list of applications
applications = send_and_receive(self.client, ('propose', gdata) )
self.logratings(step='apply')
if len(applications):
gnames = [self.gname(gid) for gid in applications]
self.messages.append('You applied to group'+('s ' if len(gnames) > 1 else ' ')+', '.join(gnames))
else:
self.messages.append('You did not apply to any groups')
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.log_apply(
self.cfg.simnumber, self.cfg.iternum, self.id, gids, self.nowpay,
newpays, applications, sframe, eframe, stime, etime
)
self.logp(("Agent", self.id, "proposes", applications))
for gid in applications:
g = idgroups[gid]
g.takeapplication(self)
def acceptvote(self, applicants):
if not len(applicants):
# If no applicants, shouldn't be calling this function, but in any case,
# return None
return None
task = self.cfg.task
self.update()
myg = self.group
## TODO: create a case for group merging
# acceptvote_groupmerge
idagents = {a.id:a for a in applicants}
gdata = sorted([(a.id, task(myg.withskills(a))/(myg.gsize+a.gsize))
for a in applicants])
naids, newpays = zip(*gdata)
self.logratings()
self.logratingstatus('acceptvote', naids)
# Send all data to GUI and blocking receive
# Wait for user to reply with list of applications
accept_id = send_and_receive(self.client, ('acceptvote', gdata) )
self.logratings(step='acceptvote')
naids = list(naids)
newpays = list(newpays)
# Add the "accept no one" option
naids.append(-1)
newpays.append(self.nowpay)
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.log_accept(
self.cfg.simnumber, self.cfg.iternum, self.id, naids, self.nowpay,
newpays, accept_id, sframe, eframe, stime, etime
)
if accept_id != -1:
self.logp(("Agent", self.id, "votes to accept", accept_id))
if accept_id != -1:
#self.messages.append('You voted to accept agent '+str(accept_id)+
# ' into the group')
return idagents[accept_id]
else:
# No applicant should join the team
return None
def expelvote(self):
task = self.cfg.task
self.update()
nowpay = self.nowpay
myg = self.group
idagents = {a.id:a for a in myg.agents}
#del idagents[self.id]
gdata = sorted([(a.id, task(myg.withoutskills(a))/(myg.gsize-1))
for aid, a in sorted(idagents.items()) if aid != self.id])
naids, newpays = zip(*gdata)
self.logratings()
self.logratingstatus('expelvote', naids)
# Send all data to GUI and blocking receive
# Wait for user to reply with list of applications
expel_id = send_and_receive(self.client, ('expelvote', gdata) )
self.logratings(step='expelvote')
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.log_expel(
self.cfg.simnumber, self.cfg.iternum, self.id, naids, self.nowpay,
newpays, expel_id, sframe, eframe, stime, etime
)
self.logp(("Agent", self.id, "votes to expel", expel_id))
if expel_id == -1:
# No member should leave the team
return None
else:
#self.messages.append('You voted to expel agent '+str(accept_id)+
# ' from the group')
return idagents[expel_id]
def consider(self):
if not len(self.acceptances) or \
not len([g.gsize for g in self.acceptances if g.gsize > 0]):
self.messages.append('You received no acceptances')
return
task = self.cfg.task
self.update()
idgroups = {g.id:g for g in self.acceptances}
gdata = sorted([ (g.id, g.gsize, task(g.withskills(self))/(g.gsize+1),
[a.id for a in g.agents]) for g in self.acceptances])
gids, gsizes, gpays, gmembers = zip(*gdata)
self.logratings()
self.logratingstatus('join', gids+(-1, ),
gmembers+([a.id for a in self.group.agents], ))
# Send all data to GUI and blocking receive
# Wait for user to reply with list of applications
choice_id = send_and_receive(self.client, ('consider', gdata) )
self.logratings(step='join')
if choice_id == -1:
# Player does not want to switch groups.
pass
else:
self.messages.append('You switched to group '+self.gname(choice_id))
self.switchgroup(idgroups[choice_id])
gids = list(gids)
gsizes = list(gsizes)
gpays = list(gpays)
# Add your current group (no change)
gids.append(-1)
gsizes.append(self.group.gsize)
gpays.append(self.nowpay)
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.log_join(
self.cfg.simnumber, self.cfg.iternum, self.id, gids, self.nowpay, gpays,
choice_id, sframe, eframe, stime, etime
)
if choice_id != -1:
self.logp(("Agent", self.id, "joins", choice_id))
self.acceptances = []
def notifygaccept(self, aid):
actor = 'Your group' if self.group.gsize > 1 else 'You'
if aid != -1:
self.messages.append(actor+' voted to accept '+self.aname(aid)+
' to join your team')
else:
self.messages.append(actor+' voted to allow no one to join your team')
def notifyjoin(self, aid, add=True, expel=False):
changewords = ['left', 'joined']
if aid != -1:
if not expel:
self.messages.append(self.aname(aid)+' '+changewords[add]+' your team')
else:
if aid == self.id:
self.messages.append('You were expelled from your team')
else:
self.messages.append('Your team expelled '+self.aname(aid))
else:
if expel:
self.messages.append('No agents were expelled from your team')
else:
self.messages.append('No agents '+changewords[add]+' your team')
def postprocess_iter(self):
# Send all data to GUI and blocking receive
self.update()
if len(self.messages):
send_and_receive(self.client, ('turndone', '\n'.join(self.messages)) )
else:
self.log("No new messages for human player", 6)
#if self.cfg.do_ratings:
# self.logratings(step='postprocess_iter')
self.logratings()
###TODO: Maybe wait to ensure user is done???
self.messages = []
def postprocess(self, globalpay=None):
self.update()
self.messages.append('You earned '+CURR+str(round(self.nowpay, 2)))
self.logratings()
send_message(self.client, ('addpay', round(self.nowpay, 2)) )
send_and_receive(self.client, ('postprocess', '\n'.join(self.messages)) )
self.logratings(step='postprocess')
self.messages = []
def postprocess_pg(self, globalpay=None):
self.update()
self.messages.append('You can earn '+CURR+str(round(self.nowpay, 2))+
' on this team.')
self.logratings()
send_and_receive(self.client, ('postprocess', '\n'.join(self.messages)) )
#send_message(self.client, ('addpay', round(self.nowpay, 2)) )
self.logratings(step='postprocess')
self.logratings()
self.messages = []
## PUBLIC GOODS FUNCTIONS:
def publicgoods(self, pgdict, potmult):
self.update()
if self.cfg.do_ratings: self.hideratings()
send_and_receive(self.client, ('publicgoods_instructions', potmult))
if self.cfg.do_ratings: self.showratings()
## Send team as neighbors
if self.cfg.show_skills:
teamdata = [(n.id, n.group.id, int(np.where(n.skills)[0][0]))
for n in self.group.agents if n != self]
else:
teamdata = [(n.id, n.group.id, -1) for n in self.group.agents if n != self]
send_message(self.client, ('updatenbrs', teamdata) )
self.logratings()
self.logratingstatus('pubgood', [n.id for n in self.group.agents if n != self])
# Send current pay with the publicgoods message
contrib = send_and_receive(self.client, ('publicgoods', (int(self.nowpay), potmult)))
self.logratings(step='publicgoods')
self.logp(("Agent", self.id, "contribs", contrib, "/", int(self.nowpay)))
#return contrib
pgdict[self] = (contrib, int(self.nowpay)-contrib)
def publicgoods_postprocess(self, startpay, keep, contrib, privatepay, potpay, teampays):
maxcontrib = startpay
newpay = privatepay + potpay
# Send summary info to the GUI
self.messages.append('You made '+CURR+str(round(startpay, 2))+
' this round by working with this team.')
cdesc = 'the shared pot' if not self.cfg.hide_publicgoods else 'the lottery'
self.messages.append('Of that, you contributed '+CURR+str(contrib)+' to '+
cdesc+' and kept '+CURR+str(round(keep, 2)))
if self.cfg.alt_pubgoods and not self.cfg.hide_publicgoods:
ratings = [self.global_ratings[n.id] for n in self.group.agents
if n.id in self.global_ratings]
contribs = [teampays[n.id][0]/float(startpay) for n in self.group.agents]
potmult = self.cfg.pubgoods_calc(contribs, ratings)
potmult = int((potmult-1)*100)
self.messages.append('The pot was increased by {0}%'.format(potmult))
self.messages.append('You received '+CURR+str(round(potpay, 2))+
' from '+cdesc)
self.messages.append('You earned a total of '+CURR+str(round(newpay, 2))+
' this round.')
### UPDATE UI WITH DATA ABOUT TEAM CONTRIBS
teammateids = [n.id for n in self.group.agents]
contribs = [teampays[n][0] for n in teammateids]
send_message(self.client, ('updatemyteam', (self.group.id, str(contrib)) ))
teammates = list(self.group.agents)
teammates.remove(self)
teamids = [n.id for n in teammates]
teamdata = [(n.id, n.group.id, teampays[n.id][0]) for n in teammates]
send_message(self.client, ('updatenbrs', teamdata) )
send_message(self.client, ('publicgoods_conclusion', (newpay, (teammateids, contribs))))
send_message(self.client, ('addpay', round(newpay, 2)) )
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.log_pubgoods(self.cfg.simnumber,
self.id, self.group.id,
teamids, contrib,
[teampays[n.id][0] for n in teammates],
keep, newpay,
sframe, eframe, stime, etime)
self.logratings()
send_and_receive(self.client, ('postprocess', '\n'.join(self.messages)) )
self.logratings(step='pg_postprocess')
self.logratingstatus('simend', range(self.cfg.n))
self.messages = []
## ULTIMATUM FUNCTIONS:
def ultimatum_init(self):
send_message(self.client, ('ultimatum', None))
send_message(self.client, ('u_instructions', None))
def ultimatum(self, other_player):
pass
def dictator(self, other_player):
send_message(self.client, ('dictator', None))
send_message(self.client, ('d_instructions', None))
def ask_for_offer(self, other_player):
amount = send_and_receive(self.client, ('u_makeoffer', other_player))
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.ultevent_insert(
self.id, other_player, 'make_offer', amount, sframe, eframe, stime, etime
)
return amount
def wait_offer(self, other_player):
send_message(self.client, ('u_waitoffer', other_player))
def decide_offer(self, other_player, amount):
result = send_and_receive(self.client,
('u_decideoffer', (other_player, amount)))
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.ultevent_insert(
self.id, other_player, 'decide_offer', result, sframe, eframe, stime, etime
)
return result
def wait_decide(self, other_player):
send_message(self.client, ('u_waitdecide', other_player))
def show_conclusion_u(self, other_player, amount, result, role):
send_and_receive(self.client,
('u_conclusion', (other_player, amount, result, role)))
if result:
if role == 0:
my_amount = 10-amount
else:
my_amount = amount
else:
my_amount = 0
sframe, eframe, stime, etime = self.getframetimes()
self.cfg._dblog.ultevent_insert(
self.id, other_player, 'conclusion', my_amount,
sframe, eframe, stime, etime)
if self.cfg.do_ratings:
self.logratings(step='ultimatum', simnum=-1, iternum=-1)
def show_conclusion_d(self, other_player, amount, role):
send_message(self.client, ('d_conclusion', (other_player, amount, role)))
def u_review(self):
send_message(self.client, ('u_review', None))
| NDykhuis/team-formation-study-software | humanagent.py | Python | gpl-2.0 | 25,571 | 0.013922 |
"""
A list is a sequence
1.Can be any type
2.The values in a list are called elements or sometimes items
3.Declare with square brackets: [ ]
4.Can be nested. [x, y, [z1, z2]]
"""
myStr1 = 'aabbcc'
myStr2 = 'aabbcc'
print('myStr1 = ', myStr1)
print('myStr2 = ', myStr2)
print('myStr1 is myStr2 = ', myStr1 is myStr2, ' (Equivalent + Identical)')
myList1 = [10, 20, 30]
myList2 = [10, 20, 30]
print('myList1 = ', myList1)
print('myList2 = ', myList2)
print('myList1 is myList2 = ', myList1 is myList2, ' (Equivalent + Not Identical)')
print('When you pass a list to a function, the function gets a reference to the list.')
t1 = [1, 2]
t2 = t1.append(3)
t3 = t1 + [3]
print('t1 = [1, 2]')
print('t2 = t1.append(3)')
print('t3 = t1 + [3]')
print('t1 now is ', t1)
print('t2 now is ', t2)
print('t3 now is ', t3)
| flake123p/ProjectH | Python/_Basics_/A11_Reference/test.py | Python | gpl-3.0 | 817 | 0.007344 |
#!/usr/bin/python
#coding: UTF-8
#COPIRIGHT: Patrick Roncagliolo
#LICENCE: GNU GPL 3
import cgi, json
argsDict = cgi.FieldStorage()
EMPTY_DICT = {}
def getState (init = False):
dataDict = getDataDict ()
if dataDict is None \
and init is True:
(key, uri) = generateTOTP ()
generateQR (key, uri)
dataDict = newDataDict (key, uri)
setDataDict (dataDict)
devDict = getDevDict ()
if devDict is None \
and init is True:
devDict = newDevDict ()
setDevDict (devDict)
return (dataDict, devDict)
def generateTOTP ():
import string, random
from otpauth import OtpAuth as otpauth
key=''.join((random.choice(string.ascii_uppercase + string.digits)) for x in range(30))
auth = otpauth(key)
uri = auth.to_uri('totp', 'patrick@WakeOnLAN', 'WakeOnLAN')
return (key, uri)
def generateQR (key, uri):
import os, qrcode
from glob import glob
img = qrcode.make(uri)
for oldImg in glob("data/*.png"):
os.remove(oldImg)
img.save("data/%s.png" % key)
def newDataDict (key, uri):
return {'otp-type': 'totp', 'key': key, 'uri': uri, 'post-token': '0'}
def getDataDict ():
try:
with open('data/data.json', 'r') as f:
dataDict = json.load(f)
except IOError:
dataDict = None
return dataDict
def setDataDict(dataDict):
with open('data/data.json', 'w') as dataFile:
json.dump(dataDict, dataFile)
def newDevDict():
return {}
def getDevDict():
try:
with open('data/devices.json', 'r') as devFile:
devDict = json.load(devFile)
except IOError:
devDict = None
return devDict
def setDevDict(devDict):
with open('data/devices.json', 'w') as devFile:
json.dump(devDict, devFile)
def addDevice(devDict, devname, devaddr):
devname = devname.lower().capitalize()
devaddr = devaddr.lower().replace('-',':')
if devname not in devDict:
devDict[devname]=devaddr
setDevDict(devDict)
return True
else:
return False
def rmvDevice(devDict, devname):
devname = devname.lower().capitalize()
if devname in devDict:
del devDict[devname]
setDevDict(devDict)
return True
else:
return False
def checkToken(dataDict):
if 'post-token' in dataDict.keys():
data_token = int(dataDict['post-token'])
token = data_token + 1
else:
raise KeyError
if 'action' in argsDict.keys() \
and 'token' in argsDict.keys():
post_token = int(argsDict['token'].value)
if post_token > data_token:
updateToken(dataDict, post_token)
token = post_token + 1
return (True, token)
else:
return (False, token)
else:
return (False, token)
def updateToken(dataDict, post_token):
dataDict['post-token'] = post_token
with open('data/data.json', 'w') as dataFile:
json.dump(dataDict, dataFile)
return int(dataDict['post-token'])
def printIndexHeader(stylesheets):
print 'Content-type: text/html\n\n',
print '<!DOCTYPE html>',
print '<meta name="viewport" content="width=device-width, initial-scale=1.0">',
print '<title>RWOLS - Remote WakeOnLan Server</title>',
for stylesheet in stylesheets:
print '<link rel="stylesheet" type="text/css" href="%s">' % stylesheet,
print '<script src="https://cdn.jsdelivr.net/clipboard.js/1.5.13/clipboard.min.js"></script>',
print '<h1>Remote WakeOnLan Server</h1>'
def printBottomButton(label, link):
print '<form method="post"'
print 'action="%s">' % link,
print '<input type="submit"'
print 'value="%s">' % label,
print '</form>'
| roncapat/RWOL | rwol-web-src/utilities.py | Python | gpl-3.0 | 3,772 | 0.012725 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import pytest
from qutebrowser.browser import browsertab
pytestmark = pytest.mark.usefixtures('redirect_webengine_data')
try:
from PyQt5.QtWebKitWidgets import QWebView
except ImportError:
QWebView = None
try:
from PyQt5.QtWebEngineWidgets import QWebEngineView
except ImportError:
QWebEngineView = None
@pytest.fixture(params=[QWebView, QWebEngineView])
def view(qtbot, config_stub, request):
if request.param is None:
pytest.skip("View not available")
v = request.param()
qtbot.add_widget(v)
return v
@pytest.fixture(params=['webkit', 'webengine'])
def tab(request, qtbot, tab_registry, cookiejar_and_cache, mode_manager):
if request.param == 'webkit':
webkittab = pytest.importorskip('qutebrowser.browser.webkit.webkittab')
tab_class = webkittab.WebKitTab
elif request.param == 'webengine':
webenginetab = pytest.importorskip(
'qutebrowser.browser.webengine.webenginetab')
tab_class = webenginetab.WebEngineTab
else:
assert False
t = tab_class(win_id=0, mode_manager=mode_manager)
qtbot.add_widget(t)
yield t
class Zoom(browsertab.AbstractZoom):
def _set_factor_internal(self, _factor):
pass
def factor(self):
assert False
class Tab(browsertab.AbstractTab):
# pylint: disable=abstract-method
def __init__(self, win_id, mode_manager, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
parent=parent)
self.history = browsertab.AbstractHistory(self)
self.scroller = browsertab.AbstractScroller(self, parent=self)
self.caret = browsertab.AbstractCaret(win_id=self.win_id,
mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = Zoom(win_id=self.win_id)
self.search = browsertab.AbstractSearch(parent=self)
self.printing = browsertab.AbstractPrinting()
self.elements = browsertab.AbstractElements(self)
self.action = browsertab.AbstractAction()
def _install_event_filter(self):
pass
@pytest.mark.xfail(run=False, reason='Causes segfaults, see #1638')
def test_tab(qtbot, view, config_stub, tab_registry, mode_manager):
tab_w = Tab(win_id=0, mode_manager=mode_manager)
qtbot.add_widget(tab_w)
assert tab_w.win_id == 0
assert tab_w._widget is None
tab_w._set_widget(view)
assert tab_w._widget is view
assert tab_w.history._tab is tab_w
assert tab_w.history._history is view.history()
assert view.parent() is tab_w
with qtbot.waitExposed(tab_w):
tab_w.show()
| NoctuaNivalis/qutebrowser | tests/unit/browser/test_tab.py | Python | gpl-3.0 | 3,502 | 0 |
"""
WSGI config for dj_bookmarks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_bookmarks.settings")
application = get_wsgi_application()
| kennethlove/django_bookmarks | dj_bookmarks/dj_bookmarks/wsgi.py | Python | bsd-3-clause | 402 | 0 |
'''
MAVLink protocol implementation (auto-generated by mavgen.py)
Generated from: test.xml
Note: this file has been auto-generated. DO NOT EDIT
'''
import struct, array, time, json, os, sys, platform
from ...generator.mavcrc import x25crc
import hashlib
WIRE_PROTOCOL_VERSION = '2.0'
DIALECT = 'test'
PROTOCOL_MARKER_V1 = 0xFE
PROTOCOL_MARKER_V2 = 0xFD
HEADER_LEN_V1 = 6
HEADER_LEN_V2 = 10
MAVLINK_SIGNATURE_BLOCK_LEN = 13
MAVLINK_IFLAG_SIGNED = 0x01
native_supported = platform.system() != 'Windows' # Not yet supported on other dialects
native_force = 'MAVNATIVE_FORCE' in os.environ # Will force use of native code regardless of what client app wants
native_testing = 'MAVNATIVE_TESTING' in os.environ # Will force both native and legacy code to be used and their results compared
if native_supported and float(WIRE_PROTOCOL_VERSION) <= 1:
try:
import mavnative
except ImportError:
print('ERROR LOADING MAVNATIVE - falling back to python implementation')
native_supported = False
else:
# mavnative isn't supported for MAVLink2 yet
native_supported = False
# some base types from mavlink_types.h
MAVLINK_TYPE_CHAR = 0
MAVLINK_TYPE_UINT8_T = 1
MAVLINK_TYPE_INT8_T = 2
MAVLINK_TYPE_UINT16_T = 3
MAVLINK_TYPE_INT16_T = 4
MAVLINK_TYPE_UINT32_T = 5
MAVLINK_TYPE_INT32_T = 6
MAVLINK_TYPE_UINT64_T = 7
MAVLINK_TYPE_INT64_T = 8
MAVLINK_TYPE_FLOAT = 9
MAVLINK_TYPE_DOUBLE = 10
class MAVLink_header(object):
'''MAVLink message header'''
def __init__(self, msgId, incompat_flags=0, compat_flags=0, mlen=0, seq=0, srcSystem=0, srcComponent=0):
self.mlen = mlen
self.seq = seq
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.msgId = msgId
self.incompat_flags = incompat_flags
self.compat_flags = compat_flags
def pack(self, force_mavlink1=False):
if WIRE_PROTOCOL_VERSION == '2.0' and not force_mavlink1:
return struct.pack('<BBBBBBBHB', 253, self.mlen,
self.incompat_flags, self.compat_flags,
self.seq, self.srcSystem, self.srcComponent,
self.msgId&0xFFFF, self.msgId>>16)
return struct.pack('<BBBBBB', PROTOCOL_MARKER_V1, self.mlen, self.seq,
self.srcSystem, self.srcComponent, self.msgId)
class MAVLink_message(object):
'''base MAVLink message class'''
def __init__(self, msgId, name):
self._header = MAVLink_header(msgId)
self._payload = None
self._msgbuf = None
self._crc = None
self._fieldnames = []
self._type = name
self._signed = False
self._link_id = None
def get_msgbuf(self):
if isinstance(self._msgbuf, bytearray):
return self._msgbuf
return bytearray(self._msgbuf)
def get_header(self):
return self._header
def get_payload(self):
return self._payload
def get_crc(self):
return self._crc
def get_fieldnames(self):
return self._fieldnames
def get_type(self):
return self._type
def get_msgId(self):
return self._header.msgId
def get_srcSystem(self):
return self._header.srcSystem
def get_srcComponent(self):
return self._header.srcComponent
def get_seq(self):
return self._header.seq
def get_signed(self):
return self._signed
def get_link_id(self):
return self._link_id
def __str__(self):
ret = '%s {' % self._type
for a in self._fieldnames:
v = getattr(self, a)
ret += '%s : %s, ' % (a, v)
ret = ret[0:-2] + '}'
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if other == None:
return False
if self.get_type() != other.get_type():
return False
# We do not compare CRC because native code doesn't provide it
#if self.get_crc() != other.get_crc():
# return False
if self.get_seq() != other.get_seq():
return False
if self.get_srcSystem() != other.get_srcSystem():
return False
if self.get_srcComponent() != other.get_srcComponent():
return False
for a in self._fieldnames:
if getattr(self, a) != getattr(other, a):
return False
return True
def to_dict(self):
d = dict({})
d['mavpackettype'] = self._type
for a in self._fieldnames:
d[a] = getattr(self, a)
return d
def to_json(self):
return json.dumps(self.to_dict())
def sign_packet(self, mav):
h = hashlib.new('sha256')
self._msgbuf += struct.pack('<BQ', mav.signing.link_id, mav.signing.timestamp)[:7]
h.update(mav.signing.secret_key)
h.update(self._msgbuf)
sig = h.digest()[:6]
self._msgbuf += sig
mav.signing.timestamp += 1
def pack(self, mav, crc_extra, payload, force_mavlink1=False):
plen = len(payload)
if WIRE_PROTOCOL_VERSION != '1.0' and not force_mavlink1:
# in MAVLink2 we can strip trailing zeros off payloads. This allows for simple
# variable length arrays and smaller packets
while plen > 1 and payload[plen-1] == chr(0):
plen -= 1
self._payload = payload[:plen]
incompat_flags = 0
if mav.signing.sign_outgoing:
incompat_flags |= MAVLINK_IFLAG_SIGNED
self._header = MAVLink_header(self._header.msgId,
incompat_flags=incompat_flags, compat_flags=0,
mlen=len(self._payload), seq=mav.seq,
srcSystem=mav.srcSystem, srcComponent=mav.srcComponent)
self._msgbuf = self._header.pack(force_mavlink1=force_mavlink1) + self._payload
crc = x25crc(self._msgbuf[1:])
if True: # using CRC extra
crc.accumulate_str(struct.pack('B', crc_extra))
self._crc = crc.crc
self._msgbuf += struct.pack('<H', self._crc)
if mav.signing.sign_outgoing and not force_mavlink1:
self.sign_packet(mav)
return self._msgbuf
# enums
class EnumEntry(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.param = {}
enums = {}
# message IDs
MAVLINK_MSG_ID_BAD_DATA = -1
MAVLINK_MSG_ID_TEST_TYPES = 0
class MAVLink_test_types_message(MAVLink_message):
'''
Test all field types
'''
id = MAVLINK_MSG_ID_TEST_TYPES
name = 'TEST_TYPES'
fieldnames = ['c', 's', 'u8', 'u16', 'u32', 'u64', 's8', 's16', 's32', 's64', 'f', 'd', 'u8_array', 'u16_array', 'u32_array', 'u64_array', 's8_array', 's16_array', 's32_array', 's64_array', 'f_array', 'd_array']
ordered_fieldnames = [ 'u64', 's64', 'd', 'u64_array', 's64_array', 'd_array', 'u32', 's32', 'f', 'u32_array', 's32_array', 'f_array', 'u16', 's16', 'u16_array', 's16_array', 'c', 's', 'u8', 's8', 'u8_array', 's8_array' ]
format = '<Qqd3Q3q3dIif3I3i3fHh3H3hc10sBb3B3b'
native_format = bytearray('<QqdQqdIifIifHhHhccBbBb', 'ascii')
orders = [16, 17, 18, 12, 6, 0, 19, 13, 7, 1, 8, 2, 20, 14, 9, 3, 21, 15, 10, 4, 11, 5]
lengths = [1, 1, 1, 3, 3, 3, 1, 1, 1, 3, 3, 3, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3]
array_lengths = [0, 0, 0, 3, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0, 3, 3, 0, 10, 0, 0, 3, 3]
crc_extra = 103
def __init__(self, c, s, u8, u16, u32, u64, s8, s16, s32, s64, f, d, u8_array, u16_array, u32_array, u64_array, s8_array, s16_array, s32_array, s64_array, f_array, d_array):
MAVLink_message.__init__(self, MAVLink_test_types_message.id, MAVLink_test_types_message.name)
self._fieldnames = MAVLink_test_types_message.fieldnames
self.c = c
self.s = s
self.u8 = u8
self.u16 = u16
self.u32 = u32
self.u64 = u64
self.s8 = s8
self.s16 = s16
self.s32 = s32
self.s64 = s64
self.f = f
self.d = d
self.u8_array = u8_array
self.u16_array = u16_array
self.u32_array = u32_array
self.u64_array = u64_array
self.s8_array = s8_array
self.s16_array = s16_array
self.s32_array = s32_array
self.s64_array = s64_array
self.f_array = f_array
self.d_array = d_array
def pack(self, mav, force_mavlink1=False):
return MAVLink_message.pack(self, mav, 103, struct.pack('<Qqd3Q3q3dIif3I3i3fHh3H3hc10sBb3B3b', self.u64, self.s64, self.d, self.u64_array[0], self.u64_array[1], self.u64_array[2], self.s64_array[0], self.s64_array[1], self.s64_array[2], self.d_array[0], self.d_array[1], self.d_array[2], self.u32, self.s32, self.f, self.u32_array[0], self.u32_array[1], self.u32_array[2], self.s32_array[0], self.s32_array[1], self.s32_array[2], self.f_array[0], self.f_array[1], self.f_array[2], self.u16, self.s16, self.u16_array[0], self.u16_array[1], self.u16_array[2], self.s16_array[0], self.s16_array[1], self.s16_array[2], self.c, self.s, self.u8, self.s8, self.u8_array[0], self.u8_array[1], self.u8_array[2], self.s8_array[0], self.s8_array[1], self.s8_array[2]), force_mavlink1=force_mavlink1)
mavlink_map = {
MAVLINK_MSG_ID_TEST_TYPES : MAVLink_test_types_message,
}
class MAVError(Exception):
'''MAVLink error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVString(str):
'''NUL terminated string'''
def __init__(self, s):
str.__init__(self)
def __str__(self):
i = self.find(chr(0))
if i == -1:
return self[:]
return self[0:i]
class MAVLink_bad_data(MAVLink_message):
'''
a piece of bad data in a mavlink stream
'''
def __init__(self, data, reason):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BAD_DATA, 'BAD_DATA')
self._fieldnames = ['data', 'reason']
self.data = data
self.reason = reason
self._msgbuf = data
def __str__(self):
'''Override the __str__ function from MAVLink_messages because non-printable characters are common in to be the reason for this message to exist.'''
return '%s {%s, data:%s}' % (self._type, self.reason, [('%x' % ord(i) if isinstance(i, str) else '%x' % i) for i in self.data])
class MAVLinkSigning(object):
'''MAVLink signing state class'''
def __init__(self):
self.secret_key = None
self.timestamp = 0
self.link_id = 0
self.sign_outgoing = False
self.allow_unsigned_callback = None
self.stream_timestamps = {}
self.badsig_count = 0
self.goodsig_count = 0
self.unsigned_count = 0
self.reject_count = 0
class MAVLink(object):
'''MAVLink protocol handling class'''
def __init__(self, file, srcSystem=0, srcComponent=0, use_native=False):
self.seq = 0
self.file = file
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.callback = None
self.callback_args = None
self.callback_kwargs = None
self.send_callback = None
self.send_callback_args = None
self.send_callback_kwargs = None
self.buf = bytearray()
self.buf_index = 0
self.expected_length = HEADER_LEN_V1+2
self.have_prefix_error = False
self.robust_parsing = False
self.protocol_marker = 253
self.little_endian = True
self.crc_extra = True
self.sort_fields = True
self.total_packets_sent = 0
self.total_bytes_sent = 0
self.total_packets_received = 0
self.total_bytes_received = 0
self.total_receive_errors = 0
self.startup_time = time.time()
self.signing = MAVLinkSigning()
if native_supported and (use_native or native_testing or native_force):
print("NOTE: mavnative is currently beta-test code")
self.native = mavnative.NativeConnection(MAVLink_message, mavlink_map)
else:
self.native = None
if native_testing:
self.test_buf = bytearray()
def set_callback(self, callback, *args, **kwargs):
self.callback = callback
self.callback_args = args
self.callback_kwargs = kwargs
def set_send_callback(self, callback, *args, **kwargs):
self.send_callback = callback
self.send_callback_args = args
self.send_callback_kwargs = kwargs
def send(self, mavmsg, force_mavlink1=False):
'''send a MAVLink message'''
buf = mavmsg.pack(self, force_mavlink1=force_mavlink1)
self.file.write(buf)
self.seq = (self.seq + 1) % 256
self.total_packets_sent += 1
self.total_bytes_sent += len(buf)
if self.send_callback:
self.send_callback(mavmsg, *self.send_callback_args, **self.send_callback_kwargs)
def buf_len(self):
return len(self.buf) - self.buf_index
def bytes_needed(self):
'''return number of bytes needed for next parsing stage'''
if self.native:
ret = self.native.expected_length - self.buf_len()
else:
ret = self.expected_length - self.buf_len()
if ret <= 0:
return 1
return ret
def __parse_char_native(self, c):
'''this method exists only to see in profiling results'''
m = self.native.parse_chars(c)
return m
def __callbacks(self, msg):
'''this method exists only to make profiling results easier to read'''
if self.callback:
self.callback(msg, *self.callback_args, **self.callback_kwargs)
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
self.buf.extend(c)
self.total_bytes_received += len(c)
if self.native:
if native_testing:
self.test_buf.extend(c)
m = self.__parse_char_native(self.test_buf)
m2 = self.__parse_char_legacy()
if m2 != m:
print("Native: %s\nLegacy: %s\n" % (m, m2))
raise Exception('Native vs. Legacy mismatch')
else:
m = self.__parse_char_native(self.buf)
else:
m = self.__parse_char_legacy()
if m != None:
self.total_packets_received += 1
self.__callbacks(m)
else:
# XXX The idea here is if we've read something and there's nothing left in
# the buffer, reset it to 0 which frees the memory
if self.buf_len() == 0 and self.buf_index != 0:
self.buf = bytearray()
self.buf_index = 0
return m
def __parse_char_legacy(self):
'''input some data bytes, possibly returning a new message (uses no native code)'''
header_len = HEADER_LEN_V1
if self.buf_len() >= 1 and self.buf[self.buf_index] == PROTOCOL_MARKER_V2:
header_len = HEADER_LEN_V2
if self.buf_len() >= 1 and self.buf[self.buf_index] != PROTOCOL_MARKER_V1 and self.buf[self.buf_index] != PROTOCOL_MARKER_V2:
magic = self.buf[self.buf_index]
self.buf_index += 1
if self.robust_parsing:
m = MAVLink_bad_data(chr(magic), 'Bad prefix')
self.expected_length = header_len+2
self.total_receive_errors += 1
return m
if self.have_prefix_error:
return None
self.have_prefix_error = True
self.total_receive_errors += 1
raise MAVError("invalid MAVLink prefix '%s'" % magic)
self.have_prefix_error = False
if self.buf_len() >= 3:
sbuf = self.buf[self.buf_index:3+self.buf_index]
if sys.version_info[0] < 3:
sbuf = str(sbuf)
(magic, self.expected_length, incompat_flags) = struct.unpack('BBB', sbuf)
if magic == PROTOCOL_MARKER_V2 and (incompat_flags & MAVLINK_IFLAG_SIGNED):
self.expected_length += MAVLINK_SIGNATURE_BLOCK_LEN
self.expected_length += header_len + 2
if self.expected_length >= (header_len+2) and self.buf_len() >= self.expected_length:
mbuf = array.array('B', self.buf[self.buf_index:self.buf_index+self.expected_length])
self.buf_index += self.expected_length
self.expected_length = header_len+2
if self.robust_parsing:
try:
if magic == PROTOCOL_MARKER_V2 and (incompat_flags & ~MAVLINK_IFLAG_SIGNED) != 0:
raise MAVError('invalid incompat_flags 0x%x 0x%x %u' % (incompat_flags, magic, self.expected_length))
m = self.decode(mbuf)
except MAVError as reason:
m = MAVLink_bad_data(mbuf, reason.message)
self.total_receive_errors += 1
else:
if magic == PROTOCOL_MARKER_V2 and (incompat_flags & ~MAVLINK_IFLAG_SIGNED) != 0:
raise MAVError('invalid incompat_flags 0x%x 0x%x %u' % (incompat_flags, magic, self.expected_length))
m = self.decode(mbuf)
return m
return None
def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret
def check_signature(self, msgbuf, srcSystem, srcComponent):
'''check signature on incoming message'''
if isinstance(msgbuf, array.array):
msgbuf = msgbuf.tostring()
timestamp_buf = msgbuf[-12:-6]
link_id = msgbuf[-13]
(tlow, thigh) = struct.unpack('<IH', timestamp_buf)
timestamp = tlow + (thigh<<32)
# see if the timestamp is acceptable
stream_key = (link_id,srcSystem,srcComponent)
if stream_key in self.signing.stream_timestamps:
if timestamp <= self.signing.stream_timestamps[stream_key]:
# reject old timestamp
# print('old timestamp')
return False
else:
# a new stream has appeared. Accept the timestamp if it is at most
# one minute behind our current timestamp
if timestamp + 6000*1000 < self.signing.timestamp:
# print('bad new stream ', timestamp/(100.0*1000*60*60*24*365), self.signing.timestamp/(100.0*1000*60*60*24*365))
return False
self.signing.stream_timestamps[stream_key] = timestamp
# print('new stream')
h = hashlib.new('sha256')
h.update(self.signing.secret_key)
h.update(msgbuf[:-6])
sig1 = str(h.digest())[:6]
sig2 = str(msgbuf)[-6:]
if sig1 != sig2:
# print('sig mismatch')
return False
# the timestamp we next send with is the max of the received timestamp and
# our current timestamp
self.signing.timestamp = max(self.signing.timestamp, timestamp)
return True
def decode(self, msgbuf):
'''decode a buffer as a MAVLink message'''
# decode the header
if msgbuf[0] != PROTOCOL_MARKER_V1:
headerlen = 10
try:
magic, mlen, incompat_flags, compat_flags, seq, srcSystem, srcComponent, msgIdlow, msgIdhigh = struct.unpack('<cBBBBBBHB', msgbuf[:headerlen])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
msgId = msgIdlow | (msgIdhigh<<16)
mapkey = msgId
else:
headerlen = 6
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('<cBBBBB', msgbuf[:headerlen])
incompat_flags = 0
compat_flags = 0
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
mapkey = msgId
if (incompat_flags & MAVLINK_IFLAG_SIGNED) != 0:
signature_len = MAVLINK_SIGNATURE_BLOCK_LEN
else:
signature_len = 0
if ord(magic) != PROTOCOL_MARKER_V1 and ord(magic) != PROTOCOL_MARKER_V2:
raise MAVError("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-(headerlen+2+signature_len):
raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u headerlen=%u' % (len(msgbuf)-(headerlen+2+signature_len), mlen, msgId, headerlen))
if not mapkey in mavlink_map:
raise MAVError('unknown MAVLink message ID %s' % str(mapkey))
# decode the payload
type = mavlink_map[mapkey]
fmt = type.format
order_map = type.orders
len_map = type.lengths
crc_extra = type.crc_extra
# decode the checksum
try:
crc, = struct.unpack('<H', msgbuf[-(2+signature_len):][:2])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg)
crcbuf = msgbuf[1:-(2+signature_len)]
if True: # using CRC extra
crcbuf.append(crc_extra)
crc2 = x25crc(crcbuf)
if crc != crc2.crc:
raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
sig_ok = False
if self.signing.secret_key is not None:
accept_signature = False
if signature_len == MAVLINK_SIGNATURE_BLOCK_LEN:
sig_ok = self.check_signature(msgbuf, srcSystem, srcComponent)
accept_signature = sig_ok
if sig_ok:
self.signing.goodsig_count += 1
else:
self.signing.badsig_count += 1
if not accept_signature and self.signing.allow_unsigned_callback is not None:
accept_signature = self.signing.allow_unsigned_callback(self, msgId)
if accept_signature:
self.signing.unsigned_count += 1
else:
self.signing.reject_count += 1
elif self.signing.allow_unsigned_callback is not None:
accept_signature = self.signing.allow_unsigned_callback(self, msgId)
if accept_signature:
self.signing.unsigned_count += 1
else:
self.signing.reject_count += 1
if not accept_signature:
raise MAVError('Invalid signature')
csize = struct.calcsize(fmt)
mbuf = msgbuf[headerlen:-(2+signature_len)]
if len(mbuf) < csize:
# zero pad to give right size
mbuf.extend([0]*(csize - len(mbuf)))
if len(mbuf) < csize:
raise MAVError('Bad message of type %s length %u needs %s' % (
type, len(mbuf), csize))
mbuf = mbuf[:csize]
try:
t = struct.unpack(fmt, mbuf)
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(mbuf), emsg))
tlist = list(t)
# handle sorted fields
if True:
t = tlist[:]
if sum(len_map) == len(len_map):
# message has no arrays in it
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
else:
# message has some arrays
tlist = []
for i in range(0, len(order_map)):
order = order_map[i]
L = len_map[order]
tip = sum(len_map[:order])
field = t[tip]
if L == 1 or isinstance(field, str):
tlist.append(field)
else:
tlist.append(t[tip:(tip + L)])
# terminate any strings
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = str(MAVString(tlist[i]))
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._signed = sig_ok
if m._signed:
m._link_id = msgbuf[-13]
m._msgbuf = msgbuf
m._payload = msgbuf[6:-(2+signature_len)]
m._crc = crc
m._header = MAVLink_header(msgId, incompat_flags, compat_flags, mlen, seq, srcSystem, srcComponent)
return m
def test_types_encode(self, c, s, u8, u16, u32, u64, s8, s16, s32, s64, f, d, u8_array, u16_array, u32_array, u64_array, s8_array, s16_array, s32_array, s64_array, f_array, d_array):
'''
Test all field types
c : char (char)
s : string (char)
u8 : uint8_t (uint8_t)
u16 : uint16_t (uint16_t)
u32 : uint32_t (uint32_t)
u64 : uint64_t (uint64_t)
s8 : int8_t (int8_t)
s16 : int16_t (int16_t)
s32 : int32_t (int32_t)
s64 : int64_t (int64_t)
f : float (float)
d : double (double)
u8_array : uint8_t_array (uint8_t)
u16_array : uint16_t_array (uint16_t)
u32_array : uint32_t_array (uint32_t)
u64_array : uint64_t_array (uint64_t)
s8_array : int8_t_array (int8_t)
s16_array : int16_t_array (int16_t)
s32_array : int32_t_array (int32_t)
s64_array : int64_t_array (int64_t)
f_array : float_array (float)
d_array : double_array (double)
'''
return MAVLink_test_types_message(c, s, u8, u16, u32, u64, s8, s16, s32, s64, f, d, u8_array, u16_array, u32_array, u64_array, s8_array, s16_array, s32_array, s64_array, f_array, d_array)
def test_types_send(self, c, s, u8, u16, u32, u64, s8, s16, s32, s64, f, d, u8_array, u16_array, u32_array, u64_array, s8_array, s16_array, s32_array, s64_array, f_array, d_array, force_mavlink1=False):
'''
Test all field types
c : char (char)
s : string (char)
u8 : uint8_t (uint8_t)
u16 : uint16_t (uint16_t)
u32 : uint32_t (uint32_t)
u64 : uint64_t (uint64_t)
s8 : int8_t (int8_t)
s16 : int16_t (int16_t)
s32 : int32_t (int32_t)
s64 : int64_t (int64_t)
f : float (float)
d : double (double)
u8_array : uint8_t_array (uint8_t)
u16_array : uint16_t_array (uint16_t)
u32_array : uint32_t_array (uint32_t)
u64_array : uint64_t_array (uint64_t)
s8_array : int8_t_array (int8_t)
s16_array : int16_t_array (int16_t)
s32_array : int32_t_array (int32_t)
s64_array : int64_t_array (int64_t)
f_array : float_array (float)
d_array : double_array (double)
'''
return self.send(self.test_types_encode(c, s, u8, u16, u32, u64, s8, s16, s32, s64, f, d, u8_array, u16_array, u32_array, u64_array, s8_array, s16_array, s32_array, s64_array, f_array, d_array), force_mavlink1=force_mavlink1)
| fqez/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/test.py | Python | gpl-3.0 | 31,243 | 0.004161 |
############################################################################
# Generic script applicable on any Operating Environments (Unix, Windows)
# ScriptName : wls_reset.py
# Properties : weblogic.properties
# Author : Kevin Yuan
############################################################################
#===========================================================================
# Connect to wls server
#===========================================================================
connect('%%WL_USR%%','%%WL_PWD%%','t3://%%WL_HOST%%:%%WL_PORT%%')
#===========================================================================
# Remove Data Sources using wlst on-line commonds for three composite models
#===========================================================================
edit()
startEdit()
delete('EclipseLinkDS','JDBCSystemResource')
delete('EclipseLinkDS2','JDBCSystemResource')
delete('EclipseLinkDS3','JDBCSystemResource')
save()
activate()
exit() | bfg-repo-cleaner-demos/eclipselink.runtime-bfg-strip-big-blobs | jpa/eclipselink.jpa.test/resource/weblogic/wls_composite_reset.py | Python | epl-1.0 | 998 | 0.018036 |
from capstone import *
from .architecture import Architecture
from avatar2.installer.config import GDB_X86, OPENOCD
class X86(Architecture):
get_gdb_executable = Architecture.resolve(GDB_X86)
get_oocd_executable = Architecture.resolve(OPENOCD)
qemu_name = 'i386'
gdb_name = 'i386'
registers = {'eax': 0,
'ecx': 1,
'edx': 2,
'ebx': 3,
'esp': 4,
'ebp': 5,
'esi': 6,
'edi': 7,
'eip': 8,
'pc': 8,
'eflags': 9,
'cs': 10,
'ss': 11,
'ds': 12,
'es': 13,
'fs': 14,
'gs': 15, }
special_registers = {
#SSE
'xmm0': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm0.v4_int32',
},
'xmm1': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm1.v4_int32',
},
'xmm2': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm2.v4_int32',
},
'xmm3': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm3.v4_int32',
},
'xmm4': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm4.v4_int32',
},
'xmm5': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm5.v4_int32',
},
'xmm6': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm6.v4_int32',
},
'xmm7': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm7.v4_int32',
},
'xmm8': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm8.v4_int32',
},
'xmm9': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm9.v4_int32',
},
'xmm10': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm10.v4_int32',
},
'xmm11': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm11.v4_int32',
},
'xmm12': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm12.v4_int32',
},
'xmm13': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm13.v4_int32',
},
'xmm14': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm14.v4_int32',
},
'xmm15': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm15.v4_int32',
},
#AVX
'ymm0': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm0.v8_int32',
},
'ymm1': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm1.v8_int32',
},
'ymm2': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm2.v8_int32',
},
'ymm3': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm3.v8_int32',
},
'ymm4': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm4.v8_int32',
},
'ymm5': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm5.v8_int32',
},
'ymm6': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm6.v8_int32',
},
'ymm7': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm7.v8_int32',
},
'ymm8': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm8.v8_int32',
},
'ymm9': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm9.v8_int32',
},
'ymm10': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm10.v8_int32',
},
'ymm11': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm11.v8_int32',
},
'ymm12': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm12.v8_int32',
},
'ymm13': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm13.v8_int32',
},
'ymm14': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm14.v8_int32',
},
'ymm15': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm15.v8_int32',
},
}
sr_name = 'eflags'
unemulated_instructions = []
capstone_arch = CS_ARCH_X86
capstone_mode = CS_MODE_32
word_size = 32
class X86_64(X86):
qemu_name = 'x86_64'
gdb_name = 'i386:x86-64'
registers = {'rax': 0,
'rbx': 1,
'rcx': 2,
'rdx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'pc': 16,
'eflags': 17,
'cs': 18,
'ss': 19,
'ds': 20,
'es': 21,
'fs': 22,
'gs': 23,
}
capstone_mode = CS_MODE_64
unemulated_instructions = []
capstone_mode = CS_MODE_64
word_size = 64
| avatartwo/avatar2 | avatar2/archs/x86.py | Python | apache-2.0 | 6,401 | 0.010155 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
import re
import hashlib
import lxml
from weboob.tools.browser import BaseBrowser, BrowserHTTPNotFound, BrowserHTTPError, BrowserIncorrectPassword, BrokenPageError
from weboob.capabilities.messages import CantSendMessage
from .pages.index import IndexPage, LoginPage
from .pages.news import ContentPage, NewCommentPage, NodePage, CommentPage, NewTagPage, RSSComment
from .pages.board import BoardIndexPage
from .pages.wiki import WikiEditPage
from .tools import id2url, url2id
# Browser
class DLFP(BaseBrowser):
DOMAIN = 'linuxfr.org'
PROTOCOL = 'https'
PAGES = {'https?://[^/]*linuxfr\.org/?': IndexPage,
'https?://[^/]*linuxfr\.org/compte/connexion': LoginPage,
'https?://[^/]*linuxfr\.org/news/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/wiki/(?!nouveau)[^/]+': ContentPage,
'https?://[^/]*linuxfr\.org/wiki': WikiEditPage,
'https?://[^/]*linuxfr\.org/wiki/nouveau': WikiEditPage,
'https?://[^/]*linuxfr\.org/wiki/[^\.]+/modifier': WikiEditPage,
'https?://[^/]*linuxfr\.org/suivi/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/sondages/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/users/[^\./]+/journaux/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/forums/[^\./]+/posts/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments/(\d+)': CommentPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments/nouveau': NewCommentPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments': NodePage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/tags/nouveau': NewTagPage,
'https?://[^/]*linuxfr\.org/board/index.xml': BoardIndexPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments.atom': RSSComment,
}
last_board_msg_id = None
def parse_id(self, _id):
if re.match('^https?://.*linuxfr.org/nodes/\d+/comments/\d+$', _id):
return _id, None
url = id2url(_id)
if url is None:
if url2id(_id) is not None:
url = _id
_id = url2id(url)
else:
return None, None
return url, _id
def get_wiki_content(self, _id):
url, _id = self.parse_id('W.%s' % _id)
if url is None:
return None
try:
self.location('%s/modifier' % url)
except BrowserHTTPNotFound:
return ''
assert self.is_on_page(WikiEditPage)
return self.page.get_body()
def _go_on_wiki_edit_page(self, name):
"""
Go on the wiki page named 'name'.
Return True if this is a new page, or False if
the page already exist.
Return None if it isn't a right wiki page name.
"""
url, _id = self.parse_id('W.%s' % name)
if url is None:
return None
try:
self.location('%s/modifier' % url)
except BrowserHTTPNotFound:
self.location('/wiki/nouveau')
new = True
else:
new = False
assert self.is_on_page(WikiEditPage)
return new
def set_wiki_content(self, name, content, message):
new = self._go_on_wiki_edit_page(name)
if new is None:
return None
if new:
title = name.replace('-', ' ')
else:
title = None
self.page.post_content(title, content, message)
def get_wiki_preview(self, name, content):
if self._go_on_wiki_edit_page(name) is None:
return None
self.page.post_preview(content)
if self.is_on_page(WikiEditPage):
return self.page.get_preview_html()
elif self.is_on_page(ContentPage):
return self.page.get_article().body
def get_hash(self, url):
self.location(url)
if self.page.document.xpath('//entry'):
myhash = hashlib.md5(lxml.etree.tostring(self.page.document)).hexdigest()
return myhash
else:
return None
def get_content(self, _id):
url, _id = self.parse_id(_id)
if url is None:
return None
self.location(url)
self.page.url = self.absurl(url)
if self.is_on_page(CommentPage):
content = self.page.get_comment()
elif self.is_on_page(ContentPage):
m = re.match('.*#comment-(\d+)$', url)
if m:
content = self.page.get_comment(int(m.group(1)))
else:
content = self.page.get_article()
else:
raise BrokenPageError('Not on a content or comment page (%r)' % self.page)
if _id is not None:
content.id = _id
return content
def _is_comment_submit_form(self, form):
return 'comment_new' in form.action
def post_comment(self, thread, reply_id, title, message):
url = id2url(thread)
if url is None:
raise CantSendMessage('%s is not a right ID' % thread)
self.location(url)
assert self.is_on_page(ContentPage)
self.location(self.page.get_post_comment_url())
assert self.is_on_page(NewCommentPage)
self.select_form(predicate=self._is_comment_submit_form)
self.set_all_readonly(False)
if title is not None:
self['comment[title]'] = title.encode('utf-8')
self['comment[wiki_body]'] = message.encode('utf-8')
if int(reply_id) > 0:
self['comment[parent_id]'] = str(reply_id)
self['commit'] = 'Poster le commentaire'
try:
self.submit()
except BrowserHTTPError as e:
raise CantSendMessage('Unable to send message to %s.%s: %s' % (thread, reply_id, e))
if self.is_on_page(NodePage):
errors = self.page.get_errors()
if len(errors) > 0:
raise CantSendMessage('Unable to send message: %s' % ', '.join(errors))
return None
def login(self):
if self.username is None:
return
# not usefull for the moment
#self.location('/', no_login=True)
data = {'account[login]': self.username,
'account[password]': self.password,
'account[remember_me]': 1,
#'authenticity_token': self.page.get_login_token(),
}
self.location('/compte/connexion', urllib.urlencode(data), no_login=True)
if not self.is_logged():
raise BrowserIncorrectPassword()
self._token = self.page.document.xpath('//input[@name="authenticity_token"]')
def is_logged(self):
return (self.username is None or (self.page and self.page.is_logged()))
def close_session(self):
if self._token:
self.openurl('/compte/deconnexion', urllib.urlencode({'authenticity_token': self._token[0].attrib['value']}))
def plusse(self, url):
return self.relevance(url, 'for')
def moinse(self, url):
return self.relevance(url, 'against')
def relevance(self, url, what):
comment = self.get_content(url)
if comment is None:
raise ValueError('The given URL isn\'t a comment.')
if comment.relevance_token is None:
return False
res = self.readurl('%s%s' % (comment.relevance_url, what),
urllib.urlencode({'authenticity_token': comment.relevance_token}))
return res
def iter_new_board_messages(self):
self.location('/board/index.xml')
assert self.is_on_page(BoardIndexPage)
msgs = self.page.get_messages(self.last_board_msg_id)
for msg in reversed(msgs):
self.last_board_msg_id = msg.id
yield msg
def board_post(self, msg):
request = self.request_class(self.absurl('/board/'),
urllib.urlencode({'board[message]': msg}),
{'Referer': self.absurl('/')})
self.readurl(request)
def add_tag(self, _id, tag):
url, _id = self.parse_id(_id)
if url is None:
return None
self.location(url)
assert self.is_on_page(ContentPage)
self.location(self.page.get_tag_url())
assert self.is_on_page(NewTagPage)
self.page.tag(tag)
| Boussadia/weboob | modules/dlfp/browser.py | Python | agpl-3.0 | 9,155 | 0.005789 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
from django.core.exceptions import ImproperlyConfigured
from shoop.utils.setup import Setup
from . import base_settings
def configure(setup):
base_settings.configure(setup)
local_settings_file = os.getenv('LOCAL_SETTINGS_FILE')
# Backward compatibility: Find from current directory, if
# LOCAL_SETTINGS_FILE environment variables is unset
if local_settings_file is None:
cand = os.path.join(os.path.dirname(__file__), 'local_settings.py')
if os.path.exists(cand):
local_settings_file = cand
# Load local settings from file
if local_settings_file:
local_settings_ns = {
'__file__': local_settings_file,
}
with open(local_settings_file, 'rb') as fp:
compiled = compile(fp.read(), local_settings_file, 'exec')
exec(compiled, local_settings_ns)
if 'configure' not in local_settings_ns:
raise ImproperlyConfigured('No configure in local_settings')
local_configure = local_settings_ns['configure']
local_configure(setup)
return setup
globals().update(Setup.configure(configure))
| akx/shoop | shoop_workbench/settings/__init__.py | Python | agpl-3.0 | 1,393 | 0.000718 |
from LazyMail import LazyMail, encode_string
import email
import datetime
import os
import cgi
import re
import logging
from email.header import decode_header
__author__ = 'doms'
class SavableLazyMail(LazyMail):
def __init__(self, config, mail_connection, uid, stats):
self.STATS = stats
self.CONFIG = config
LazyMail.__init__(self, mail_connection, uid)
#Gets a LazyMail and saves it to disk
#It will use the Hashcode as Filename and the date as path
#The Date-Path can be configured
#Returns true if successful. If it returns false there was at least a little failure. No rollback is made
def saveMailToHardDisk(self):
#Getting path from date
parsed_mail = self.getParsedMail()
date_raw = email.utils.parsedate_tz(parsed_mail['Date'])
if date_raw:
local_date_raw = datetime.datetime.fromtimestamp(email.utils.mktime_tz(date_raw))
path = local_date_raw.strftime(self.CONFIG.FOLDER_SYSTEM)
else:
path = "NoDate/"
#Save to file
try:
#Create Path if not exist
mail_folder_path = os.path.join(self.CONFIG.BACKUP_FOLDER_PATH, path)
if not os.path.exists(mail_folder_path):
os.makedirs(mail_folder_path)
#save eml file which can be opened with thunderbird (is more or less what the server has returned)
if self.CONFIG.SAVE_EML:
eml_path = os.path.join(mail_folder_path, "eml", )
if not os.path.exists(eml_path):
os.makedirs(eml_path)
self.saveEMLtoFile(eml_path)
#Save attachments: If there are none, False will be returned
check_attachments, attachments = self.saveAttachmentsToHardDisk(mail_folder_path)
#Create HTML-File
full_path = os.path.join(mail_folder_path, self.getHashcode()) + ".html"
file_message_without_attachment = open(full_path, 'w')
check_html = self.writeToHTML(attachments, file_message_without_attachment)
file_message_without_attachment.close()
except Exception as e:
#If anything has failed
logging.error("Failed to save mail (%s,%s) because of %s", self.getDate(), self.getSubject(), e)
return False
if check_attachments and check_html:
logging.info("Saved mail (From: %s, Subject: %s) to %s", self.getFrom(), self.getSubject(), full_path)
return True
elif check_attachments or check_html:
logging.info("Partly saved mail (From: %s, Subject: %s) to %s", self.getFrom(), self.getSubject(), full_path)
return False
else:
logging.info("Could not save mail (From: %s, Subject: %s)", self.getFrom(), self.getSubject())
return False
#Writes a lazy_mail to a given HTML-File
def writeToHTML(self, attachments, html_file):
check = True
try:
#HTML-Header
html_file.write("<!DOCTYPE html> <html lang=\"en\"> <head> <title>")
html_file.write(self.getSubject())
html_file.write("</title> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> </head> <body> <div class=\"row\"> <div class=\"col-md-12\">")
#HTML-Table with To,From,Subject
html_file.write("<table boarder=\"1\">\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>From: </td>\n")
html_file.write("\t\t<td>" + self.getFrom() + "</td>\n")
html_file.write("\t<tr>\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>To: </td>\n")
html_file.write("\t\t<td>" + self.getTo() + "</td>\n")
html_file.write("\t<tr>\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>Subject: </td>\n")
html_file.write("\t\t<td>" + self.getSubject() + "</td>\n")
html_file.write("\t<tr>\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>Date: </td>\n")
html_file.write("\t\t<td>" + self.getDate() + "</td>\n")
html_file.write("\t<tr>\n")
#Information in Table if Attachments
if len(attachments) > 0:
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>Attachments: </td><td>")
for attachment in attachments:
html_file.write("<a href=\"" + attachment[0] + "\">" + cgi.escape(encode_string(str(attachment[1]), None)) + "</a>")
if attachment is not attachments[-1]:
html_file.write(", ")
html_file.write("</td>\n")
html_file.write("\t<tr>\n")
html_file.write("</table>\n")
html_file.write("<div class=\"col-md-8 col-md-offset-1 footer\"> <hr /><div style=\"white-space: pre-wrap;\">")
#Write content to File
check, content_of_mail = self.getContent()
if content_of_mail['text']:
html_file.write("<pre>")
strip_header = re.sub(r"(?i)<html>.*?<head>.*?</head>.*?<body>", "", content_of_mail['text'],
flags=re.DOTALL)
strip_header = re.sub(r"(?i)</body>.*?</html>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)<!DOCTYPE.*?>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)POSITION: absolute;", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)TOP: .*?;", "", strip_header, flags=re.DOTALL)
html_file.write(strip_header)
html_file.write("</pre>\n")
if content_of_mail['html']:
strip_header = re.sub(r"(?i)<html>.*?<head>.*?</head>.*?<body>", "", content_of_mail['html'],
flags=re.DOTALL)
strip_header = re.sub(r"(?i)</body>.*?</html>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)<!DOCTYPE.*?>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)POSITION: absolute;", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)TOP: .*?;", "", strip_header, flags=re.DOTALL)
html_file.write(strip_header)
#HTML-Footer
#html_file.write("</div> <div class=\"col-md-8 col-md-offset-1 footer\"> <hr /><div style=\"white-space: pre-wrap;\">")
#html_file.write(lazy_mail.getHeader())
html_file.write("</div></div></body></html>")
except Exception as e:
logging.error("Could not write HTML because of %s", e)
raise e
return check
#Saves the attachments of a LazyMail to disk. Uses the Path 'folder_prefix-filename'
#E.g for folder_prefix="2014/05/03/4a9fd924" and filename="photo.jpg" it will be "2014/05/03/4a9fd924-photo.jpg"
def saveAttachmentsToHardDisk(self, folder):
attachments_tuple_for_html = []
filename_count = dict() #to handle attachments with same name
successful = True
for part in self.getParsedMail().walk():
attachment_filename = "(Could not encode)"
attachment_filename_encoding = None
try:
content_maintype = part.get_content_maintype()
if content_maintype == 'multipart' or content_maintype == 'text' or content_maintype == 'html':
continue
if part.get('Content-Disposition') == None:
continue
try:
attachment_filename = decode_header(part.get_filename())[0][0]
attachment_filename_encoding = decode_header(part.get_filename())[0][1]
except Exception as e:
logging.debug("Workaround Filename Encoding")
logging.debug(str(part))
try:
attachment_filename = encode_string(part.get_filename(), None) #"(could not encode filename)"
logging.debug(attachment_filename)
except:
logging.error("Could not encode filename, %s", e)
attachment_filename = "(Could not encode)"
attachment_filename_encoding = None
successful = False
if not attachment_filename:
logging.warning("Empty part in mail. Don't know what to do with it!")
logging.debug(str(part))
continue
#put a (x) behind filename if same filename already exists
if attachment_filename in filename_count:
filename_count[attachment_filename] = filename_count[attachment_filename] + 1
logging.debug("Same Filename %s", attachment_filename)
root, ext = os.path.splitext(attachment_filename)
attachment_filename = root + "(" + str(filename_count[attachment_filename]) + ")" + ext
else:
filename_count[attachment_filename] = 1
attachment_folder_name = os.path.join("attachments", self.getHashcode(), "")
attachment_folder_path = os.path.join(folder, attachment_folder_name)
attachments_tuple_for_html += [(attachment_folder_name + attachment_filename, cgi.escape(
encode_string(attachment_filename, attachment_filename_encoding)))] #TODO
if not os.path.exists(attachment_folder_path):
os.makedirs(attachment_folder_path)
attachment_path = attachment_folder_path + attachment_filename
attachment_file_disk = open(attachment_path, "wb")
attachment_file_disk.write(part.get_payload(decode=True))
logging.info("Saved attachment %s to %s", attachment_filename, attachment_path)
self.STATS.succesfully_safed_attachment()
except Exception as e:
successful = False
self.STATS.failed_to_safe_attachment()
logging.error("Failed to save attachment %s: %s", attachment_filename, e)
return successful, attachments_tuple_for_html | dserv01/BackupMailToHTML | SavableLazyMail.py | Python | gpl-3.0 | 10,434 | 0.007188 |
from __future__ import print_function
import numpy as np
import sys
import bisect
import datetime
import gzip
def my_print(s):
print("[" + str(datetime.datetime.now()) + "] " + s, file=sys.stderr)
if len(sys.argv) < 3:
print("Usage: process_mz_query.py dump_file[.gz] query_file")
exit(0)
my_print("Reading dump file from %s..." % sys.argv[1])
if sys.argv[1][-2:] == 'gz':
f = gzip.open(sys.argv[1], 'rb')
else:
f = open(sys.argv[1])
spectra = []
arr = []
for line in f:
arr = line.strip().split("|")
if len(arr) < 3:
continue
spectra.append( ( arr[0], np.array([ float(x) for x in arr[2].split(" ") ]), np.array([ float(x) for x in arr[1].split(" ") ]) ) )
f.close()
## at this point, spectra array contains triples of the form
## (group_id, list of mzs, list of intensities)
my_print("Reading and processing queries from %s..." % sys.argv[2])
def get_one_group_total(mz_lower, mz_upper, mzs, intensities):
return np.sum(intensities[ bisect.bisect_left(mzs, mz_lower) : bisect.bisect_right(mzs, mz_upper) ])
def get_all_totals(mz, tol, spectra):
mz_lower = mz - tol
mz_upper = mz + tol
return [ (x[0], get_one_group_total(mz_lower, mz_upper, x[1], x[2])) for x in spectra ]
with open(sys.argv[2]) as f:
for line in f:
arr = line.strip().split(",")
print(" ".join([ "%s:%.3f" % x for x in get_all_totals(float(arr[0]), float(arr[1]), spectra)]))
my_print("All done!")
exit(0)
| alexandrovteam/pyImagingMSpec | pyImagingMSpec/scripts/process_mz_query.py | Python | apache-2.0 | 1,468 | 0.017711 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.statistic import data_overview
from federatedml.util import consts, LOGGER
class QuantileBinningTool(QuantileBinning):
"""
Use for quantile binning data directly.
"""
def __init__(self, bin_nums=consts.G_BIN_NUM, param_obj: FeatureBinningParam = None,
abnormal_list=None, allow_duplicate=False):
if param_obj is None:
param_obj = FeatureBinningParam(bin_num=bin_nums)
super().__init__(params=param_obj, abnormal_list=abnormal_list, allow_duplicate=allow_duplicate)
self.has_fit = False
def fit_split_points(self, data_instances):
res = super(QuantileBinningTool, self).fit_split_points(data_instances)
self.has_fit = True
return res
def fit_summary(self, data_instances, is_sparse=None):
if is_sparse is None:
is_sparse = data_overview.is_sparse_data(data_instances)
LOGGER.debug(f"is_sparse: {is_sparse}")
f = functools.partial(self.feature_summary,
params=self.params,
abnormal_list=self.abnormal_list,
cols_dict=self.bin_inner_param.bin_cols_map,
header=self.header,
is_sparse=is_sparse)
summary_dict_table = data_instances.mapReducePartitions(f, self.copy_merge)
# summary_dict = dict(summary_dict.collect())
if is_sparse:
total_count = data_instances.count()
summary_dict_table = summary_dict_table.mapValues(lambda x: x.set_total_count(total_count))
return summary_dict_table
def get_quantile_point(self, quantile):
"""
Return the specific quantile point value
Parameters
----------
quantile : float, 0 <= quantile <= 1
Specify which column(s) need to apply statistic.
Returns
-------
return a dict of result quantile points.
eg.
quantile_point = {"x1": 3, "x2": 5... }
"""
if not self.has_fit:
raise RuntimeError("Quantile Binning Tool's split points should be fit before calling"
" get quantile points")
f = functools.partial(self._get_split_points,
allow_duplicate=self.allow_duplicate,
percentile_rate=[quantile])
quantile_points = dict(self.summary_dict.mapValues(f).collect())
quantile_points = {k: v[0] for k, v in quantile_points.items()}
return quantile_points
def get_median(self):
return self.get_quantile_point(0.5)
| FederatedAI/FATE | python/federatedml/feature/binning/quantile_tool.py | Python | apache-2.0 | 3,447 | 0.001451 |
# -*- coding: utf-8 -*-
# Author: Joris Jensen <jjensen@techfak.uni-bielefeld.de>
#
# License: BSD 3 clause
from __future__ import division
import math
from math import log
import numpy as np
from scipy.optimize import minimize
from .glvq import GlvqModel
from sklearn.utils import validation
class GmlvqModel(GlvqModel):
"""Generalized Matrix Learning Vector Quantization
Parameters
----------
prototypes_per_class : int or list of int, optional (default=1)
Number of prototypes per class. Use list to specify different numbers
per class.
initial_prototypes : array-like,
shape = [n_prototypes, n_features + 1], optional
Prototypes to start with. If not given initialization near the class
means. Class label must be placed as last entry of each prototype
initial_matrix : array-like, shape = [dim, n_features], optional
Relevance matrix to start with.
If not given random initialization for rectangular matrix and unity
for squared matrix.
regularization : float, optional (default=0.0)
Value between 0 and 1. Regularization is done by the log determinant
of the relevance matrix. Without regularization relevances may
degenerate to zero.
dim : int, optional (default=nb_features)
Maximum rank or projection dimensions
max_iter : int, optional (default=2500)
The maximum number of iterations.
gtol : float, optional (default=1e-5)
Gradient norm must be less than gtol before successful
termination of l-bfgs-b.
beta : int, optional (default=2)
Used inside phi.
1 / (1 + np.math.exp(-beta * x))
C : array-like, shape = [2,3] ,optional
Weights for wrong classification of form (y_real,y_pred,weight)
Per default all weights are one, meaning you only need to specify
the weights not equal one.
display : boolean, optional (default=False)
Print information about the bfgs steps.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
w_ : array-like, shape = [n_prototypes, n_features]
Prototype vector, where n_prototypes in the number of prototypes and
n_features is the number of features
c_w_ : array-like, shape = [n_prototypes]
Prototype classes
classes_ : array-like, shape = [n_classes]
Array containing labels.
dim_ : int
Maximum rank or projection dimensions
omega_ : array-like, shape = [dim, n_features]
Relevance matrix
See also
--------
GlvqModel, GrlvqModel, LgmlvqModel
"""
def __init__(self, prototypes_per_class=1, initial_prototypes=None,
initial_matrix=None, regularization=0.0, dim=None,
max_iter=2500, gtol=1e-5, beta=2, C=None, display=False,
random_state=None):
super(GmlvqModel, self).__init__(prototypes_per_class,
initial_prototypes, max_iter,
gtol, beta, C, display, random_state)
self.regularization = regularization
self.initial_matrix = initial_matrix
self.initialdim = dim
def _optgrad(self, variables, training_data, label_equals_prototype,
random_state, lr_relevances=0, lr_prototypes=1):
n_data, n_dim = training_data.shape
variables = variables.reshape(variables.size // n_dim, n_dim)
nb_prototypes = self.c_w_.shape[0]
omega_t = variables[nb_prototypes:].conj().T
# dist = _squared_euclidean(training_data.dot(omega_t),
# variables[:nb_prototypes].dot(omega_t))
dist = self._compute_distance(training_data, variables[:nb_prototypes],
omega_t.T)
d_wrong = dist.copy()
d_wrong[label_equals_prototype] = np.inf
distwrong = d_wrong.min(1)
pidxwrong = d_wrong.argmin(1)
d_correct = dist
d_correct[np.invert(label_equals_prototype)] = np.inf
distcorrect = d_correct.min(1)
pidxcorrect = d_correct.argmin(1)
distcorrectpluswrong = distcorrect + distwrong
distcorectminuswrong = distcorrect - distwrong
mu = distcorectminuswrong / distcorrectpluswrong
mu = np.vectorize(self.phi_prime)(mu)
mu *= self.c_[label_equals_prototype.argmax(1), d_wrong.argmin(1)]
g = np.zeros(variables.shape)
distcorrectpluswrong = 4 / distcorrectpluswrong ** 2
if lr_relevances > 0:
gw = np.zeros(omega_t.T.shape)
for i in range(nb_prototypes):
idxc = i == pidxcorrect
idxw = i == pidxwrong
dcd = mu[idxw] * distcorrect[idxw] * distcorrectpluswrong[idxw]
dwd = mu[idxc] * distwrong[idxc] * distcorrectpluswrong[idxc]
if lr_relevances > 0:
difc = training_data[idxc] - variables[i]
difw = training_data[idxw] - variables[i]
gw -= np.dot(difw * dcd[np.newaxis].T, omega_t).T.dot(difw) - \
np.dot(difc * dwd[np.newaxis].T, omega_t).T.dot(difc)
if lr_prototypes > 0:
g[i] = dcd.dot(difw) - dwd.dot(difc)
elif lr_prototypes > 0:
g[i] = dcd.dot(training_data[idxw]) - \
dwd.dot(training_data[idxc]) + \
(dwd.sum(0) - dcd.sum(0)) * variables[i]
f3 = 0
if self.regularization:
f3 = np.linalg.pinv(omega_t.conj().T).conj().T
if lr_relevances > 0:
g[nb_prototypes:] = 2 / n_data \
* lr_relevances * gw - self.regularization * f3
if lr_prototypes > 0:
g[:nb_prototypes] = 1 / n_data * lr_prototypes \
* g[:nb_prototypes].dot(omega_t.dot(omega_t.T))
g = g * (1 + 0.0001 * random_state.rand(*g.shape) - 0.5)
return g.ravel()
def _optfun(self, variables, training_data, label_equals_prototype):
n_data, n_dim = training_data.shape
variables = variables.reshape(variables.size // n_dim, n_dim)
nb_prototypes = self.c_w_.shape[0]
omega_t = variables[nb_prototypes:] # .conj().T
# dist = _squared_euclidean(training_data.dot(omega_t),
# variables[:nb_prototypes].dot(omega_t))
dist = self._compute_distance(training_data, variables[:nb_prototypes],
omega_t)
d_wrong = dist.copy()
d_wrong[label_equals_prototype] = np.inf
distwrong = d_wrong.min(1)
d_correct = dist
d_correct[np.invert(label_equals_prototype)] = np.inf
distcorrect = d_correct.min(1)
distcorrectpluswrong = distcorrect + distwrong
distcorectminuswrong = distcorrect - distwrong
mu = distcorectminuswrong / distcorrectpluswrong
if self.regularization > 0:
reg_term = self.regularization * log(
np.linalg.det(omega_t.conj().T.dot(omega_t)))
return np.vectorize(self.phi)(mu).sum(0) - reg_term # f
return np.vectorize(self.phi)(mu).sum(0)
def _optimize(self, x, y, random_state):
if not isinstance(self.regularization,
float) or self.regularization < 0:
raise ValueError("regularization must be a positive float ")
nb_prototypes, nb_features = self.w_.shape
if self.initialdim is None:
self.dim_ = nb_features
elif not isinstance(self.initialdim, int) or self.initialdim <= 0:
raise ValueError("dim must be an positive int")
else:
self.dim_ = self.initialdim
if self.initial_matrix is None:
if self.dim_ == nb_features:
self.omega_ = np.eye(nb_features)
else:
self.omega_ = random_state.rand(self.dim_, nb_features) * 2 - 1
else:
self.omega_ = validation.check_array(self.initial_matrix)
if self.omega_.shape[1] != nb_features: # TODO: check dim
raise ValueError(
"initial matrix has wrong number of features\n"
"found=%d\n"
"expected=%d" % (self.omega_.shape[1], nb_features))
variables = np.append(self.w_, self.omega_, axis=0)
label_equals_prototype = y[np.newaxis].T == self.c_w_
method = 'l-bfgs-b'
res = minimize(
fun=lambda vs:
self._optfun(vs, x, label_equals_prototype=label_equals_prototype),
jac=lambda vs:
self._optgrad(vs, x, label_equals_prototype=label_equals_prototype,
random_state=random_state,
lr_prototypes=1, lr_relevances=0),
method=method, x0=variables,
options={'disp': self.display, 'gtol': self.gtol,
'maxiter': self.max_iter})
n_iter = res.nit
res = minimize(
fun=lambda vs:
self._optfun(vs, x, label_equals_prototype=label_equals_prototype),
jac=lambda vs:
self._optgrad(vs, x, label_equals_prototype=label_equals_prototype,
random_state=random_state,
lr_prototypes=0, lr_relevances=1),
method=method, x0=res.x,
options={'disp': self.display, 'gtol': self.gtol,
'maxiter': self.max_iter})
n_iter = max(n_iter, res.nit)
res = minimize(
fun=lambda vs:
self._optfun(vs, x, label_equals_prototype=label_equals_prototype),
jac=lambda vs:
self._optgrad(vs, x, label_equals_prototype=label_equals_prototype,
random_state=random_state,
lr_prototypes=1, lr_relevances=1),
method=method, x0=res.x,
options={'disp': self.display, 'gtol': self.gtol,
'maxiter': self.max_iter})
n_iter = max(n_iter, res.nit)
out = res.x.reshape(res.x.size // nb_features, nb_features)
self.w_ = out[:nb_prototypes]
self.omega_ = out[nb_prototypes:]
self.omega_ /= math.sqrt(
np.sum(np.diag(self.omega_.T.dot(self.omega_))))
self.n_iter_ = n_iter
def _compute_distance(self, x, w=None, omega=None):
if w is None:
w = self.w_
if omega is None:
omega = self.omega_
nb_samples = x.shape[0]
nb_prototypes = w.shape[0]
distance = np.zeros([nb_prototypes, nb_samples])
for i in range(nb_prototypes):
distance[i] = np.sum((x - w[i]).dot(omega.T) ** 2, 1)
return distance.T
def project(self, x, dims, print_variance_covered=False):
"""Projects the data input data X using the relevance matrix of trained
model to dimension dim
Parameters
----------
x : array-like, shape = [n,n_features]
input data for project
dims : int
dimension to project to
print_variance_covered : boolean
flag to print the covered variance of the projection
Returns
--------
C : array, shape = [n,n_features]
Returns predicted values.
"""
v, u = np.linalg.eig(self.omega_.conj().T.dot(self.omega_))
idx = v.argsort()[::-1]
if print_variance_covered:
print('variance coverd by projection:',
v[idx][:dims].sum() / v.sum() * 100)
return x.dot(u[:, idx][:, :dims].dot(np.diag(np.sqrt(v[idx][:dims]))))
| MrNuggelz/sklearn-glvq | sklearn_lvq/gmlvq.py | Python | bsd-3-clause | 12,014 | 0.000083 |
""":mod:`libearth.sanitizer` --- Sanitize HTML tags
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import cgi
try:
import htmlentitydefs
import HTMLParser
except ImportError:
from html import entities as htmlentitydefs, parser as HTMLParser
import re
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
from .compat import unichr, xrange
__all__ = 'HtmlSanitizer', 'MarkupTagCleaner', 'clean_html', 'sanitize_html'
def clean_html(html):
"""Strip *all* markup tags from ``html`` string.
That means, it simply makes the given ``html`` document a plain text.
:param html: html string to clean
:type html: :class:`str`
:returns: cleaned plain text
:rtype: :class:`str`
"""
parser = MarkupTagCleaner()
parser.feed(html)
return ''.join(parser.fed)
def sanitize_html(html, base_uri=None):
"""Sanitize the given ``html`` string. It removes the following
tags and attributes that are not secure nor useful for RSS reader layout:
- ``<script>`` tags
- ``display: none;`` styles
- JavaScript event attributes e.g. ``onclick``, ``onload``
- ``href`` attributes that start with ``javascript:``, ``jscript:``,
``livescript:``, ``vbscript:``, ``data:``, ``about:``, or ``mocha:``.
Also, it rebases all links on the ``base_uri`` if it's given.
:param html: html string to sanitize
:type html: :class:`str`
:param base_uri: an optional base url to be used throughout the document
for relative url addresses
:type base_uri: :class:`str`
:returns: cleaned plain text
:rtype: :class:`str`
.. versionadded:: 0.4.0
The ``base_uri`` parameter.
"""
parser = HtmlSanitizer(base_uri)
parser.feed(html)
return ''.join(parser.fed)
class MarkupTagCleaner(HTMLParser.HTMLParser):
"""HTML parser that is internally used by :func:`clean_html()` function."""
entity_map = htmlentitydefs.name2codepoint
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
try:
codepoint = self.entity_map[name]
except KeyError:
pass
else:
self.fed.append(unichr(codepoint))
def handle_charref(self, name):
if name.startswith('x'):
codepoint = int(name[1:], 16)
else:
codepoint = int(name)
self.fed.append(unichr(codepoint))
class HtmlSanitizer(HTMLParser.HTMLParser):
"""HTML parser that is internally used by :func:`sanitize_html()`
function.
"""
#: (:class:`re.RegexObject`) The regular expression pattern that matches to
#: disallowed CSS properties.
DISALLOWED_STYLE_PATTERN = re.compile(
r'(^|;)\s*display\s*:\s*[a-z-]+\s*(?:;\s*|$)',
re.IGNORECASE
)
#: (:class:`collections.Set`) The set of disallowed URI schemes e.g.
#: ``javascript:``.
DISALLOWED_SCHEMES = frozenset([
'javascript', 'jscript', 'livescript', 'vbscript', 'data',
'about', 'mocha'
])
def __init__(self, base_uri):
HTMLParser.HTMLParser.__init__(self)
self.base_uri = base_uri
self.fed = []
self.ignore = False
def handle_starttag(self, tag, attrs):
if tag == 'script':
self.ignore = True
return
elif self.ignore:
return
remove_css = self.DISALLOWED_STYLE_PATTERN.sub
self.fed.extend(('<', tag))
disallowed_schemes = tuple(scheme + ':'
for scheme in self.DISALLOWED_SCHEMES)
if self.base_uri is not None and tag in ('a', 'link') and attrs:
for i in xrange(len(attrs)):
a, v = attrs[i]
if a == 'href':
attrs[i] = a, urlparse.urljoin(self.base_uri, v)
self.fed.extend(
chunk
for name, value in attrs
if not name.startswith('on')
for chunk in (
[' ', name]
if value is None else
[
' ', name, '="', cgi.escape(
('' if value.startswith(disallowed_schemes) else value)
if name == 'href' else
(remove_css('\\1', value) if name == 'style' else value)
), '"'
]
)
)
self.fed.append('>')
def handle_endtag(self, tag):
if tag == 'script':
self.ignore = False
return
self.fed.extend(('</', tag, '>'))
def handle_data(self, d):
if self.ignore:
return
self.fed.append(d)
def handle_entityref(self, name):
if self.ignore:
return
self.fed.extend(('&', name, ';'))
def handle_charref(self, name):
if self.ignore:
return
self.fed.extend(('&#' + name + ';'))
def handle_comment(self, data):
if self.ignore:
return
self.fed.extend(('<!-- ', data, ' -->'))
| earthreader/libearth | libearth/sanitizer.py | Python | gpl-2.0 | 5,176 | 0.000193 |
import couchdb
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
import json
import matplotlib.path as mplPath
import numpy as np
import requests
from textblob import TextBlob
import Queue
import time, socket, threading
import re
from pycorenlp import StanfordCoreNLP
from google.cloud import language
import random
with open ('polygon.json') as data_file:
polygon=json.load(data_file)
language_client = language.Client()
#Connect to couchdb server
couch = couchdb.Server('http://127.0.0.1:5984')
data_db = couch['twitter_data']
result_db = couch['suburb_data']
count = 0
num = 0
fitness=['fitness','gym','workout', 'push up', 'deadlift','bench press', 'squat','crunch','diets','weight loss','body building','yoga']
sports=['football','basketball','soccer','cricket','baseball','tennis','rugby','golf','badminton','table tennis']
outdoor=['outdoor', 'camping','trekking','swimming','surfing','running','cycling','climbing','hiking','fishing']
keywords={'fitness':fitness,'sports':sports,'outdoor':outdoor}
# Connect to NLP server
nlp = StanfordCoreNLP('http://localhost:9000')
print ' Connect to NLP server '
q1=Queue.Queue()
q2=Queue.Queue()
q3=Queue.Queue()
q4=Queue.Queue()
q5=Queue.Queue()
def dowork(q):
while True:
while not q.empty():
print "Read from queue"
#read from queue
try:
queue_data = q.get()
try:
json_data = json.loads(queue_data)
print " Load data"
except:
print " Fail load data"
continue
postcode = 0
text = json_data['text']
coordinates = json_data['coordinates']
print coordinates
_id = json_data['id']
lang = json_data['lang']
if lang!= "en":
print "Not english"
continue
place = json_data['place']
is_finance = json_data['is_finance']
created_at = json_data['created_at']
encodetext=text.encode("ascii","ignore")
plaintext = re.sub('http.*', '', encodetext) + '.'
# Get postcode
if coordinates!= 'null':
for a in polygon['features']:
bbPath = mplPath.Path(np.array(a['geometry']['coordinates'][0][0]))
#print ("%s in %s" %(bbPath.contains_point(coordinates),a['properties']['postcode']))
if bbPath.contains_point(coordinates):
print "Contains point"
postcode = str(a['properties']['postcode'].encode('ascii'))
print ("%s in %s" %(bbPath.contains_point(coordinates),a['properties']['postcode']))
break
# Search for keywords
for k in keywords:
for b in keywords.get(k):
if b in text.lower():
for suburbs in result_db:
doc = result_db.get(suburbs)
if postcode == doc['postcode']:
doc[k] += 1
result_db.save(doc)
searched_for_brunch = 'true'
print " Found one %s result in %s" %(k, postcode)
break
else:
searched_for_brunch = 'false'
print "Finish postcode and keywords"
# Stanford NLP
res = nlp.annotate(plaintext,
properties={
'annotators': 'sentiment',
'outputFormat': 'json',
'timeout': '1000' })
sentiment_value = 0
tweets = ""
count_tweet_sentence = 0
sentiment_desc=""
for s in res["sentences"]:
sentiment_value += int(s['sentimentValue'].encode('ascii'))
tweets += " ".join([t["word"] for t in s["tokens"]])
count_tweet_sentence = s["index"]
if plaintext != '' and count_tweet_sentence == 0:
count_tweet_sentence = 1
if count_tweet_sentence != 0:
# Calculate sentiment value
average_sentiment_value= sentiment_value/count_tweet_sentence
if sentiment_value/count_tweet_sentence == 0:
sentiment_desc = "Very negative"
if sentiment_value/count_tweet_sentence ==1:
sentiment_desc = "Negative"
if sentiment_value/count_tweet_sentence ==2:
sentiment_desc = "Neutral"
if sentiment_value/count_tweet_sentence ==3:
sentiment_desc = "Positive"
if sentiment_value/count_tweet_sentence ==4:
sentiment_desc = "Very positive"
print "tweets: %s has sentiment value %d" % (tweets, sentiment_value/count_tweet_sentence)
google_score=0
magnitude = 0
# Google nature language API
document = language_client.document_from_text(plaintext)
sentiment = document.analyze_sentiment().sentiment
google_score = sentiment.score
magnitude = sentiment.magnitude
print "%s has google score of %s" % (plaintext, str(google_score))
# Textblob
b=TextBlob(plaintext)
polarity = b.sentiment[0]
subjectivity = b.sentiment[1]
print "Save textblob data"
tweet_data = {'id':_id, 'text':plaintext, 'coordinates':coordinates, 'postcode':postcode, 'lang':lang,'city':place, 'is_finance':is_finance, 'created_at':created_at,
'searched_for_brunch':searched_for_brunch, 'sentiment_value':average_sentiment_value, 'sentiment':sentiment_desc, 'sentiment_score_google':google_score,
'magnitude':magnitude, 'polarity':polarity, 'subjectivity':subjectivity}
try:
data_db[str(_id)] = tweet_data
print ' Analyzed and saved one tweet to database'
except:
print "Skip update duplicate"
except Exception as e:
print e
continue
print "None in queue"
def tcplink(sock, addr):
print 'Accept new connection from %s:%s...' % addr
sock.send('Welcome!')
while True:
data = sock.recv(100000)
if data == 'exit' :
break
if data:
# Distribute work to threads
x = random.randint(1,5)
if x == 1:
q1.put(data)
print "Put to queue 1"
if x == 2:
q2.put(data)
print "Put to queue 2"
if x == 3:
q3.put(data)
print "Put to queue 3"
if x == 4:
q4.put(data)
print "Put to queue 4"
if x == 5:
q5.put(data)
print "Put to queue 5"
print "Disconnected"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0',9999))
s.listen(15)
# start 5 worker threads
a=threading.Thread(target=dowork,args=(q1,))
a.start()
print " Start process 1 analyzing message"
b=threading.Thread(target=dowork,args=(q2,))
b.start()
print " Start process 2 analyzing message"
c=threading.Thread(target=dowork,args=(q3,))
c.start()
print " Start process 3 analyzing message"
d=threading.Thread(target=dowork,args=(q4,))
d.start()
print " Start process 4 analyzing message"
e=threading.Thread(target=dowork,args=(q5,))
e.start()
print " Start process 5 analyzing message"
# Continuely listen for harvest program connection
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
| COMP90024CloudComputing/Submit_Cloud_Computing | analysis.py | Python | apache-2.0 | 8,425 | 0.01543 |
# encoding: UTF-8
'''
1. 合约选择
Contracts_init.json中写入需要订阅的期货品种,如需要订阅pp和IF,写入
{
"pp": "0",
"IF": "0"
}
2. 主力合约判断
运行程序后,点击‘合约初始化’按钮,程序会获取通联的期货数据,自动判断主力合约。并写入Contracts_init.json中。
注:通联选择的是持仓量判断主力,本程序选择的是昨日成交量判断,两者不同时会给出提示。
3. 合约订阅
4. Tick存储
'''
import json
import os
import pymongo
import tushare as ts
# ts.set_token('575593eb7696aec7339224c0fac2313780d8645f68b77369dcb35f8bcb419a0b')
ts.set_token('ced15aa738976abf2136cc9e197fbcd34776e0f8183c7660b7fdcd626a715b3b') # paolo
import time
from uiBasicWidget import QtGui, QtCore, BasicCell
from eventEngine import *
from ctaAlgo.ctaBase import *
from vtConstant import *
from vtGateway import VtSubscribeReq
########################################################################
class DataRecorder(QtGui.QFrame):
"""
用来记录历史数据的工具(基于CTA策略),
可单独运行,
本工具会记录Tick数据。
"""
# 策略的基本参数
name = u'期货合约Tick订阅@存储' # 策略实例名称
tickDbName = TICK_DB_NAME
# barDbName = MINUTE_DB_NAME
signal = QtCore.pyqtSignal(type(Event()))
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(DataRecorder, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.ctaEngine = self.mainEngine.ctaEngine
self.ctpConnected = False # 是否登录CTP
self.contractsDict = {} # 保存订阅symbol主力合约的字典
self.initUi()
self.registerEvent()
# 记录日志
self.writeCtaLog(u'CTA引擎启动成功')
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'CTA@ Tick订阅&存储')
# 按钮
ctpButton = QtGui.QPushButton(u'登录CTP')
mongoButton = QtGui.QPushButton(u'连接数据库')
initButton = QtGui.QPushButton(u'合约初始化 (判断主力合约)')
startButton = QtGui.QPushButton(u'启动订阅')
stopButton = QtGui.QPushButton(u'停止订阅')
ctpButton.clicked.connect(self.ctpConnect)
mongoButton.clicked.connect(self.dbConnect)
initButton.clicked.connect(self.contractsInit) # 初始化合约,主力合约判断
startButton.clicked.connect(self.startAll)
stopButton.clicked.connect(self.stopAll)
# 放置订阅合约(由于订阅的合约较多,所以选择了两个monitor展示订阅的合约)
self.symbolMonitor1 = SymbolMonitor()
self.symbolMonitor2 = SymbolMonitor()
# CTA组件的日志监控
self.ctaLogMonitor = QtGui.QTextEdit()
self.ctaLogMonitor.setReadOnly(True)
self.ctaLogMonitor.setMaximumHeight(200)
# 设置布局
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(ctpButton)
hbox1.addWidget(mongoButton)
hbox1.addWidget(initButton)
hbox1.addStretch()
# hbox2 = QtGui.QHBoxLayout()
# hbox2.addWidget(initButton)
# hbox2.addStretch()
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(startButton)
hbox3.addWidget(stopButton)
hbox3.addStretch()
hbox4 = QtGui.QHBoxLayout()
hbox4.addWidget(self.symbolMonitor1)
hbox4.addWidget(self.symbolMonitor2)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
# vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addWidget(self.ctaLogMonitor)
self.setLayout(vbox)
#----------------------------------------------------------------------
def dbConnect(self):
"""连接MongoDB数据库"""
if not self.mainEngine.dbClient:
try:
self.mainEngine.dbConnect()
self.writeCtaLog(u'MongoDB连接成功')
except pymongo.errors.ConnectionFailure:
self.writeCtaLog(u'MongoDB连接失败')
#----------------------------------------------------------------------
def ctpConnect(self):
# 登录CTP
self.mainEngine.connect('CTP')
self.ctpConnected = True
self.writeCtaLog(u'CTP登录成功')
#----------------------------------------------------------------------
def contractsInit(self):
"""获取期货合约"""
# 载入json文件
fileName = 'Contracts_init.json'
try:
f = open(fileName, 'r')
except IOError:
self.writeCtaLog(u'读取合约初始化信息出错,请检查')
return
# 解析json文件
self.contractsDict = json.load(f)
f.close()
# 获取上个交易日lastDate
todayDate = time.strftime('%Y-%m-%d',time.localtime())
mt = ts.Master()
Cal = mt.TradeCal(exchangeCD='XSGE',beginDate=''.join(todayDate.split('-')),endDate=''.join(todayDate.split('-')),field='')
lastDate = Cal.at[0, 'prevTradeDate']
lastDate = ''.join(lastDate.split('-'))
# 获取主力合约
st = ts.Market()
for contract in self.contractsDict.keys():
data = st.MktMFutd(tradeDate=lastDate,contractObject=contract,field='ticker,mainCon,turnoverVol')
# 通联持仓主力
ticker1 = data[data['mainCon'] == 1]['ticker'].values
# 昨日成交量主力
ticker2 = data.at[data['turnoverVol'].argmax(), 'ticker']
# 默认选择成交量主力
self.contractsDict[contract] = unicode(ticker2)
# 当成交量主力于持仓主力不一致时,输出信息
if ticker1 != ticker2:
self.writeCtaLog(u'期货 %s: 请确认主力合约(默认使用成交量):\n %s -通联持仓主力\n %s -昨日成交量主力' % (contract, ticker1, ticker2))
print u'期货 %s: 请确认主力合约(默认使用成交量):\n %s -通联持仓主力\n %s -昨日成交量主力' % (contract, ticker1, ticker2)
print data
# 写入文件
f = json.dumps(self.contractsDict)
file = open(fileName, 'w')
file.write(f)
file.close()
self.writeCtaLog(u'合约初始化成功')
# (由于本人订阅的合约较多,所以选择了两个monitor展示订阅的合约)
# 另外只展示了主力合约代码,没有展示tick最新更新时间等信息,个人感觉用处不大
contractsDict1 = {}
contractsDict2 = {}
total = len(self.contractsDict)
self.writeCtaLog(u'订阅合约数量: %s' % total)
for i, (symbol, contract) in enumerate(self.contractsDict.items()):
if i < (total + 1) / 2:
contractsDict1[symbol] = contract
else:
contractsDict2[symbol] = contract
# 写入Monitor
self.symbolMonitor1.contractsDict = contractsDict1
self.symbolMonitor1.updateTable()
self.symbolMonitor2.contractsDict = contractsDict2
self.symbolMonitor2.updateTable()
#----------------------------------------------------------------------
def startAll(self):
if self.ctpConnected is False:
self.writeCtaLog(u'未登录CTP, 期货Tick 订阅失败')
return
if self.mainEngine.dbClient is None:
self.writeCtaLog(u'未连接数据库, 期货Tick 订阅失败')
return
# 订阅合约
print self.contractsDict.values() # 打印所有订阅合约
for contract in self.contractsDict.values():
try:
# print contract
req = VtSubscribeReq()
req.symbol = contract
self.mainEngine.subscribe(req, 'CTP')
except:
self.writeCtaLog(u'期货Tick , 合约%s 订阅失败' %(contract))
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
self.writeCtaLog(u'期货Tick 订阅成功')
#----------------------------------------------------------------------
def stopAll(self):
# 取消订阅
self.eventEngine.unregister(EVENT_TICK, self.procecssTickEvent)
self.writeCtaLog(u'期货Tick 取消订阅')
#----------------------------------------------------------------------
def insertTick(self, tick, symbol):
"""向数据库中插入tick数据"""
self.ctaEngine.insertData(self.tickDbName, symbol, tick)
#----------------------------------------------------------------------
def insertBar(self, bar, symbol):
"""向数据库中插入bar数据"""
self.ctaEngine.insertData(self.barDbName, symbol, bar)
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
self.onTick(tick)
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送"""
# 收到Tick后,首先插入到数据库里
# print tick.symbol, 'tick.symbol'
self.insertTick(tick, tick.symbol)
# # 计算K线
# tickMinute = tick.datetime.minute
#
# if tickMinute != self.barMinute: # 如果分钟变了,则把旧的K线插入数据库,并生成新的K线
# if self.bar:
# self.onBar(self.bar)
#
# bar = CtaBarData() # 创建新的K线,目的在于防止之前K线对象在插入Mongo中被再次修改,导致出错
# bar.vtSymbol = tick.vtSymbol
# bar.symbol = tick.symbol
# bar.exchange = tick.exchange
#
# bar.open = tick.lastPrice
# bar.high = tick.lastPrice
# bar.low = tick.lastPrice
# bar.close = tick.lastPrice
#
# bar.date = tick.date
# bar.time = tick.time
# bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
#
# bar.volume = tick.volume
# bar.openInterest = tick.openInterest
#
# self.bar = bar # 这种写法为了减少一层访问,加快速度
# self.barMinute = tickMinute # 更新当前的分钟
#
# else: # 否则继续累加新的K线
# bar = self.bar # 写法同样为了加快速度
#
# bar.high = max(bar.high, tick.lastPrice)
# bar.low = min(bar.low, tick.lastPrice)
# bar.close = tick.lastPrice
#
# bar.volume = bar.volume + tick.volume # 成交量是累加的
# bar.openInterest = tick.openInterest # 持仓量直接更新
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录CTA日志"""
# content = self.name + ':' + content
self.ctaEngine.writeCtaLog(content)
#----------------------------------------------------------------------
def updateCtaLog(self, event):
"""更新CTA相关日志"""
log = event.dict_['data']
content = '\t'.join([log.logTime, log.logContent])
self.ctaLogMonitor.append(content)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.signal.connect(self.updateCtaLog)
self.eventEngine.register(EVENT_CTA_LOG, self.signal.emit)
########################################################################
class SymbolMonitor(QtGui.QTableWidget):
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(SymbolMonitor, self).__init__(parent)
self.contractsDict = {}
self.initTable()
#----------------------------------------------------------------------
def initTable(self):
"""初始化表格"""
# 设置表格的列数
self.setColumnCount(2)
# 设置列表头
labels = [u'品种', u'合约']
self.setHorizontalHeaderLabels(labels)
# 关闭左边的垂直表头
self.verticalHeader().setVisible(False)
# 设为不可编辑
self.setEditTriggers(self.NoEditTriggers)
# 设为行交替颜色
self.setAlternatingRowColors(True)
#----------------------------------------------------------------------
def updateTable(self):
for i in range(self.rowCount()):
self.removeRow(0)
for symbol, contract in self.contractsDict.items():
self.insertRow(0)
# print symbol, contract
self.setItem(0, 0, BasicCell(symbol))
self.setItem(0, 1, BasicCell(contract))
# self.horizontalHeader().resizeSections(QtGui.QHeaderView.ResizeToContents)
if __name__ == '__main__':
pass
# dr = DataRecorder()
# dr.contractsInit() | freeitaly/Trading-System | vn.trader/dataRecorderAlone/DataRecorder -Paolo版本/uiDataRecorder1.py | Python | mit | 13,588 | 0.003854 |
import _plotly_utils.basevalidators
class ArrayminussrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="arrayminussrc", parent_name="bar.error_x", **kwargs
):
super(ArrayminussrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/bar/error_x/_arrayminussrc.py | Python | mit | 429 | 0 |
from optparse import make_option
from optparse import OptionParser
import logging
#import os
#import sys
import contextlib
#import hashlib
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
#from django.db.models import Q
import dateutil
import netCDF4
from lizard_neerslagradar import netcdf
logger = logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ""
help = "Create a geotiff per timestep from the radar.nc file."
option_list = BaseCommand.option_list + (
make_option(
"--from", action="store", type="string",
dest="from_", default="2011-01-07",
help="Generate geotiffs starting from this datetime. "
"Use a string in the format YYYY-MM-DD HH:MM "
"(fuzzy substrings are allowed)"),
make_option("--skip-existing", action="store_true",
dest="skip_existing", default=False,
help="Skip existing geotiffs"),
)
def handle(self, *args, **options):
parser = OptionParser(option_list=self.option_list)
(options, args) = parser.parse_args()
logger.warn("IGNORED from=%s", options.from_)
logger.warn("IGNORED skip_existing=%s", options.skip_existing)
time_from = dateutil.parser.parse('2011-01-07T00:00:00.000Z')
time_to = dateutil.parser.parse('2011-01-08T00:00:00.000Z')
times_list = [time_from]
if time_to:
interval = datetime.timedelta(minutes=5)
time = time_from
while time < time_to:
time += interval
times_list.append(time)
nc = netCDF4.Dataset(settings.RADAR_NC_PATH, 'r')
with contextlib.closing(nc):
for time in times_list:
try:
path = netcdf.time_2_path(time)
netcdf.mk_geotiff(nc, time, path)
logger.info('Created geotiff for {}'.format(time))
except:
logger.exception(
'While creating geotiff for {}'.format(time))
| lizardsystem/lizard-neerslagradar | lizard_neerslagradar/management/commands/create_geotiffs.py | Python | gpl-3.0 | 2,139 | 0.002338 |
from pythonforandroid.toolchain import shprint, current_directory
from pythonforandroid.recipe import Recipe
from multiprocessing import cpu_count
from os.path import exists
import sh
class LibSecp256k1Recipe(Recipe):
url = 'https://github.com/bitcoin-core/secp256k1/archive/master.zip'
def build_arch(self, arch):
super(LibSecp256k1Recipe, self).build_arch(arch)
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
if not exists('configure'):
shprint(sh.Command('./autogen.sh'), _env=env)
shprint(
sh.Command('./configure'),
'--host=' + arch.toolchain_prefix,
'--prefix=' + self.ctx.get_python_install_dir(),
'--enable-shared',
'--enable-module-recovery',
'--enable-experimental',
'--enable-module-ecdh',
_env=env)
shprint(sh.make, '-j' + str(cpu_count()), _env=env)
libs = ['.libs/libsecp256k1.so']
self.install_libs(arch, *libs)
recipe = LibSecp256k1Recipe()
| wexi/python-for-android | pythonforandroid/recipes/libsecp256k1/__init__.py | Python | mit | 1,139 | 0 |
import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r < np.max(radii):
maxphot = phot_interp(eta_r)
else:
maxphot = np.max(phot)
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max(radii)
c = 5 * np.log10(r80 / r20)
return c
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def find_eta(eta_val, radii, phot):
eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val)
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2)
| astronomeralex/morphology-software | morphology.py | Python | mit | 3,255 | 0.009831 |
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates out a Closure deps.js file given a list of JavaScript sources.
Paths can be specified as arguments or (more commonly) specifying trees
with the flags (call with --help for descriptions).
Usage: depswriter.py [path/to/js1.js [path/to/js2.js] ...]
"""
import logging
import optparse
import os
import posixpath
import shlex
import sys
import source
import treescan
def MakeDepsFile(source_map):
"""Make a generated deps file.
Args:
source_map: A dict map of the source path to source.Source object.
Returns:
str, A generated deps file source.
"""
# Write in path alphabetical order
paths = source_map.keys()
paths.sort()
lines = []
for path in paths:
js_source = source_map[path]
# We don't need to add entries that don't provide anything.
if js_source.provides:
lines.append(_GetDepsLine(path, js_source))
return ''.join(lines)
def _GetDepsLine(path, js_source):
"""Get a deps.js file string for a source."""
provides = list(js_source.provides)
provides.sort()
requires = list(js_source.requires)
requires.sort()
return 'goog.addDependency(\'%s\', %s, %s);\n' % (path, provides, requires)
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
parser.add_option('--root',
dest='roots',
default=[],
action='append',
help='A root directory to scan for JS source files. '
'Paths of JS files in generated deps file will be '
'relative to this path. This flag may be specified '
'multiple times.')
parser.add_option('--root_with_prefix',
dest='roots_with_prefix',
default=[],
action='append',
help='A root directory to scan for JS source files, plus '
'a prefix (if either contains a space, surround with '
'quotes). Paths in generated deps file will be relative '
'to the root, but preceded by the prefix. This flag '
'may be specified multiple times.')
parser.add_option('--path_with_depspath',
dest='paths_with_depspath',
default=[],
action='append',
help='A path to a source file and an alternate path to '
'the file in the generated deps file (if either contains '
'a space, surround with whitespace). This flag may be '
'specified multiple times.')
return parser
def _NormalizePathSeparators(path):
"""Replaces OS-specific path separators with POSIX-style slashes.
Args:
path: str, A file path.
Returns:
str, The path with any OS-specific path separators (such as backslash on
Windows) replaced with URL-compatible forward slashes. A no-op on systems
that use POSIX paths.
"""
return path.replace(os.sep, posixpath.sep)
def _GetRelativePathToSourceDict(root, prefix=''):
"""Scans a top root directory for .js sources.
Args:
root: str, Root directory.
prefix: str, Prefix for returned paths.
Returns:
dict, A map of relative paths (with prefix, if given), to source.Source
objects.
"""
# Remember and restore the cwd when we're done. We work from the root so
# that paths are relative from the root.
start_wd = os.getcwd()
os.chdir(root)
path_to_source = {}
for path in treescan.ScanTreeForJsFiles('.'):
prefixed_path = _NormalizePathSeparators(os.path.join(prefix, path))
path_to_source[prefixed_path] = source.Source(source.GetFileContents(path))
os.chdir(start_wd)
return path_to_source
def _GetPair(s):
"""Return a string as a shell-parsed tuple. Two values expected."""
try:
# shlex uses '\' as an escape character, so they must be escaped.
s = s.replace('\\', '\\\\')
first, second = shlex.split(s)
return (first, second)
except:
raise Exception('Unable to parse input line as a pair: %s' % s)
def main():
"""CLI frontend to MakeDepsFile."""
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
path_to_source = {}
# Roots without prefixes
for root in options.roots:
path_to_source.update(_GetRelativePathToSourceDict(root))
# Roots with prefixes
for root_and_prefix in options.roots_with_prefix:
root, prefix = _GetPair(root_and_prefix)
path_to_source.update(_GetRelativePathToSourceDict(root, prefix=prefix))
# Source paths
for path in args:
path_to_source[path] = source.Source(source.GetFileContents(path))
# Source paths with alternate deps paths
for path_with_depspath in options.paths_with_depspath:
srcpath, depspath = _GetPair(path_with_depspath)
path_to_source[depspath] = source.Source(source.GetFileContents(srcpath))
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
out.write('// This file was autogenerated by %s.\n' % sys.argv[0])
out.write('// Please do not edit.\n')
out.write(MakeDepsFile(path_to_source))
if __name__ == '__main__':
main()
| SOCR/HTML5_WebSite | HTML5/BrainPainter/X/lib/closure-library/closure/bin/build/depswriter.py | Python | lgpl-3.0 | 6,203 | 0.009028 |
"""Implements basics of Capa, including class CapaModule."""
import cgi
import copy
import datetime
import hashlib
import json
import logging
import os
import traceback
import struct
import sys
import re
# We don't want to force a dependency on datadog, so make the import conditional
try:
import dogstats_wrapper as dog_stats_api
except ImportError:
dog_stats_api = None
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.responsetypes import StudentInputError, \
ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames, get_inner_html_from_xpath
from .progress import Progress
from xmodule.exceptions import NotFoundError
from xblock.fields import Scope, String, Boolean, Dict, Integer, Float
from .fields import Timedelta, Date
from django.utils.timezone import UTC
from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER
from django.conf import settings
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
"""
Pick a randomization bin for the problem given the user's seed and a problem id.
We do this because we only want e.g. 20 randomizations of a problem to make analytics
interesting. To avoid having sets of students that always get the same problems,
we'll combine the system's per-student seed with the problem id in picking the bin.
"""
r_hash = hashlib.sha1()
r_hash.update(str(seed))
r_hash.update(str(problem_id))
# get the first few digits of the hash, convert to an int, then mod.
return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS
class Randomization(String):
"""
Define a field to store how to randomize a problem.
"""
def from_json(self, value):
if value in ("", "true"):
return RANDOMIZATION.ALWAYS
elif value == "false":
return RANDOMIZATION.PER_STUDENT
return value
to_json = from_json
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers
"""
def default(self, obj):
"""
Print a nicely formatted complex number, or default to the JSON encoder
"""
if isinstance(obj, complex):
return u"{real:.7g}{imag:+.7g}*j".format(real=obj.real, imag=obj.imag)
return json.JSONEncoder.default(self, obj)
class CapaFields(object):
"""
Define the possible fields for a Capa problem
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default=_("Blank Advanced Problem")
)
attempts = Integer(
help=_("Number of attempts taken by the student on this problem"),
default=0,
scope=Scope.user_state)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
due = Date(help=_("Date that this problem is due by"), scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
showanswer = String(
display_name=_("Show Answer"),
help=_("Defines when to show the answer to the problem. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=SHOWANSWER.FINISHED,
values=[
{"display_name": _("Always"), "value": SHOWANSWER.ALWAYS},
{"display_name": _("Answered"), "value": SHOWANSWER.ANSWERED},
{"display_name": _("Attempted"), "value": SHOWANSWER.ATTEMPTED},
{"display_name": _("Closed"), "value": SHOWANSWER.CLOSED},
{"display_name": _("Finished"), "value": SHOWANSWER.FINISHED},
{"display_name": _("Correct or Past Due"), "value": SHOWANSWER.CORRECT_OR_PAST_DUE},
{"display_name": _("Past Due"), "value": SHOWANSWER.PAST_DUE},
{"display_name": _("Never"), "value": SHOWANSWER.NEVER}]
)
force_save_button = Boolean(
help=_("Whether to force the save button to appear on the page"),
scope=Scope.settings,
default=False
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button"),
help=_("Determines whether a 'Reset' button is shown so the user may reset their answer. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=default_reset_button
)
rerandomize = Randomization(
display_name=_("Randomization"),
help=_(
'Defines when to randomize the variables specified in the associated Python script. '
'For problems that do not randomize values, specify \"Never\". '
),
default=RANDOMIZATION.NEVER,
scope=Scope.settings,
values=[
{"display_name": _("Always"), "value": RANDOMIZATION.ALWAYS},
{"display_name": _("On Reset"), "value": RANDOMIZATION.ONRESET},
{"display_name": _("Never"), "value": RANDOMIZATION.NEVER},
{"display_name": _("Per Student"), "value": RANDOMIZATION.PER_STUDENT}
]
)
data = String(help=_("XML data for the problem"), scope=Scope.content, default="<problem></problem>")
correct_map = Dict(help=_("Dictionary with the correctness of current student answers"),
scope=Scope.user_state, default={})
input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state)
student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state)
done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state)
seed = Integer(help=_("Random seed for this student"), scope=Scope.user_state)
last_submission_time = Date(help=_("Last submission time"), scope=Scope.user_state)
submission_wait_seconds = Integer(
display_name=_("Timer Between Attempts"),
help=_("Seconds a student must wait between submissions for a problem with multiple attempts."),
scope=Scope.settings,
default=0)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. "
"If the value is not set, each response field in the problem is worth one point."),
values={"min": 0, "step": .1},
scope=Scope.settings
)
markdown = String(help=_("Markdown source of this module"), default=None, scope=Scope.settings)
source_code = String(
help=_("Source code for LaTeX and Word problems. This feature is not well-supported."),
scope=Scope.settings
)
text_customization = Dict(
help=_("String customization substitutions for particular locations"),
scope=Scope.settings
# TODO: someday it should be possible to not duplicate this definition here
# and in inheritance.py
)
use_latex_compiler = Boolean(
help=_("Enable LaTeX templates?"),
default=False,
scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use by this course for the specified duration. "
"Please do not share the API key with other courses and notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact moocsupport@mathworks.com"),
scope=Scope.settings
)
class CapaMixin(CapaFields):
"""
Core logic for Capa Problem, which can be used by XModules or XBlocks.
"""
def __init__(self, *args, **kwargs):
super(CapaMixin, self).__init__(*args, **kwargs)
due_date = self.due
if self.graceperiod is not None and due_date:
self.close_date = due_date + self.graceperiod
else:
self.close_date = due_date
if self.seed is None:
self.choose_new_seed()
# Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it
# there.
self.runtime.set('location', self.location.to_deprecated_string())
try:
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = self.new_lcp(self.get_state_for_lcp())
# At this point, we need to persist the randomization seed
# so that when the problem is re-loaded (to check/view/save)
# it stays the same.
# However, we do not want to write to the database
# every time the module is loaded.
# So we set the seed ONLY when there is not one set already
if self.seed is None:
self.seed = self.lcp.seed
except Exception as err: # pylint: disable=broad-except
msg = u'cannot create LoncapaProblem {loc}: {err}'.format(
loc=self.location.to_deprecated_string(), err=err)
# TODO (vshnayder): do modules need error handlers too?
# We shouldn't be switching on DEBUG.
if self.runtime.DEBUG:
log.warning(msg)
# TODO (vshnayder): This logic should be general, not here--and may
# want to preserve the data instead of replacing it.
# e.g. in the CMS
msg = u'<p>{msg}</p>'.format(msg=cgi.escape(msg))
msg += u'<p><pre>{tb}</pre></p>'.format(
# just the traceback, no message - it is already present above
tb=cgi.escape(
u''.join(
['Traceback (most recent call last):\n'] +
traceback.format_tb(sys.exc_info()[2])
)
)
)
# create a dummy problem with error message instead of failing
problem_text = (u'<problem><text><span class="inline-error">'
u'Problem {url} has an error:</span>{msg}</text></problem>'.format(
url=self.location.to_deprecated_string(),
msg=msg)
)
self.lcp = self.new_lcp(self.get_state_for_lcp(), text=problem_text)
else:
# add extra info and raise
raise Exception(msg), None, sys.exc_info()[2]
self.set_state_from_lcp()
assert self.seed is not None
def choose_new_seed(self):
"""
Choose a new seed.
"""
if self.rerandomize == RANDOMIZATION.NEVER:
self.seed = 1
elif self.rerandomize == RANDOMIZATION.PER_STUDENT and hasattr(self.runtime, 'seed'):
# see comment on randomization_bin
self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))
else:
self.seed = struct.unpack('i', os.urandom(4))[0]
# So that sandboxed code execution can be cached, but still have an interesting
# number of possibilities, cap the number of different random seeds.
self.seed %= MAX_RANDOMIZATION_BINS
def new_lcp(self, state, text=None):
"""
Generate a new Loncapa Problem
"""
if text is None:
text = self.data
capa_system = LoncapaSystem(
ajax_url=self.runtime.ajax_url,
anonymous_student_id=self.runtime.anonymous_student_id,
cache=self.runtime.cache,
can_execute_unsafe_code=self.runtime.can_execute_unsafe_code,
get_python_lib_zip=self.runtime.get_python_lib_zip,
DEBUG=self.runtime.DEBUG,
filestore=self.runtime.filestore,
i18n=self.runtime.service(self, "i18n"),
node_path=self.runtime.node_path,
render_template=self.runtime.render_template,
seed=self.runtime.seed, # Why do we do this if we have self.seed?
STATIC_URL=self.runtime.STATIC_URL,
xqueue=self.runtime.xqueue,
matlab_api_key=self.matlab_api_key
)
return LoncapaProblem(
problem_text=text,
id=self.location.html_id(),
state=state,
seed=self.seed,
capa_system=capa_system,
capa_module=self, # njp
)
def get_state_for_lcp(self):
"""
Give a dictionary holding the state of the module
"""
return {
'done': self.done,
'correct_map': self.correct_map,
'student_answers': self.student_answers,
'input_state': self.input_state,
'seed': self.seed,
}
def set_state_from_lcp(self):
"""
Set the module's state from the settings in `self.lcp`
"""
lcp_state = self.lcp.get_state()
self.done = lcp_state['done']
self.correct_map = lcp_state['correct_map']
self.input_state = lcp_state['input_state']
self.student_answers = lcp_state['student_answers']
self.seed = lcp_state['seed']
def set_last_submission_time(self):
"""
Set the module's last submission time (when the problem was checked)
"""
self.last_submission_time = datetime.datetime.now(UTC())
def get_score(self):
"""
Access the problem's score
"""
return self.lcp.get_score()
def max_score(self):
"""
Access the problem's max score
"""
return self.lcp.get_max_score()
def get_progress(self):
"""
For now, just return score / max_score
"""
score_dict = self.get_score()
score = score_dict['score']
total = score_dict['total']
if total > 0:
if self.weight is not None:
# Progress objects expect total > 0
if self.weight == 0:
return None
# scale score and total by weight/total:
score = score * self.weight / total
total = self.weight
try:
return Progress(score, total)
except (TypeError, ValueError):
log.exception("Got bad progress")
return None
return None
def get_html(self):
"""
Return some html with data about the module
"""
progress = self.get_progress()
return self.runtime.render_template('problem_ajax.html', {
'element_id': self.location.html_id(),
'id': self.location.to_deprecated_string(),
'ajax_url': self.runtime.ajax_url,
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'content': self.get_problem_html(encapsulate=False),
})
def check_button_name(self):
"""
Determine the name for the "check" button.
Usually it is just "Check", but if this is the student's
final attempt, change the name to "Final Check".
The text can be customized by the text_customization setting.
"""
# The logic flow is a little odd so that _('xxx') strings can be found for
# translation while also running _() just once for each string.
_ = self.runtime.service(self, "i18n").ugettext
check = _('Check')
final_check = _('Final Check')
# Apply customizations if present
if 'custom_check' in self.text_customization:
check = _(self.text_customization.get('custom_check')) # pylint: disable=translation-of-non-string
if 'custom_final_check' in self.text_customization:
final_check = _(self.text_customization.get('custom_final_check')) # pylint: disable=translation-of-non-string
# TODO: need a way to get the customized words into the list of
# words to be translated
if self.max_attempts is not None and self.attempts >= self.max_attempts - 1:
return final_check
else:
return check
def check_button_checking_name(self):
"""
Return the "checking..." text for the "check" button.
After the user presses the "check" button, the button will briefly
display the value returned by this function until a response is
received by the server.
The text can be customized by the text_customization setting.
"""
# Apply customizations if present
if 'custom_checking' in self.text_customization:
return self.text_customization.get('custom_checking')
_ = self.runtime.service(self, "i18n").ugettext
return _('Checking...')
def should_show_check_button(self):
"""
Return True/False to indicate whether to show the "Check" button.
"""
submitted_without_reset = (self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS)
# If the problem is closed (past due / too many attempts)
# then we do NOT show the "check" button
# Also, do not show the "check" button if we're waiting
# for the user to reset a randomized problem
if self.closed() or submitted_without_reset:
return False
else:
return True
def should_show_reset_button(self):
"""
Return True/False to indicate whether to show the "Reset" button.
"""
is_survey_question = (self.max_attempts == 0)
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the reset button.
if self.closed() and not is_survey_question:
return False
# Button only shows up for randomized problems if the question has been submitted
if self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET] and self.is_submitted():
return True
else:
# Do NOT show the button if the problem is correct
if self.is_correct():
return False
else:
return self.show_reset_button
def should_show_save_button(self):
"""
Return True/False to indicate whether to show the "Save" button.
"""
# If the user has forced the save button to display,
# then show it as long as the problem is not closed
# (past due / too many attempts)
if self.force_save_button:
return not self.closed()
else:
is_survey_question = (self.max_attempts == 0)
needs_reset = self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS
# If the student has unlimited attempts, and their answers
# are not randomized, then we do not need a save button
# because they can use the "Check" button without consequences.
#
# The consequences we want to avoid are:
# * Using up an attempt (if max_attempts is set)
# * Changing the current problem, and no longer being
# able to view it (if rerandomize is "always")
#
# In those cases. the if statement below is false,
# and the save button can still be displayed.
#
if self.max_attempts is None and self.rerandomize != RANDOMIZATION.ALWAYS:
return False
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the save button
# If we're waiting for the user to reset a randomized problem
# then do NOT show the save button
elif (self.closed() and not is_survey_question) or needs_reset:
return False
else:
return True
def handle_problem_html_error(self, err):
"""
Create a dummy problem to represent any errors.
Change our problem to a dummy problem containing a warning message to
display to users. Returns the HTML to show to users
`err` is the Exception encountered while rendering the problem HTML.
"""
log.exception(err.message)
# TODO (vshnayder): another switch on DEBUG.
if self.runtime.DEBUG:
msg = (
u'[courseware.capa.capa_module] <font size="+1" color="red">'
u'Failed to generate HTML for problem {url}</font>'.format(
url=cgi.escape(self.location.to_deprecated_string()))
)
msg += u'<p>Error:</p><p><pre>{msg}</pre></p>'.format(msg=cgi.escape(err.message))
msg += u'<p><pre>{tb}</pre></p>'.format(tb=cgi.escape(traceback.format_exc()))
html = msg
else:
# We're in non-debug mode, and possibly even in production. We want
# to avoid bricking of problem as much as possible
# Presumably, student submission has corrupted LoncapaProblem HTML.
# First, pull down all student answers
student_answers = self.lcp.student_answers
answer_ids = student_answers.keys()
# Some inputtypes, such as dynamath, have additional "hidden" state that
# is not exposed to the student. Keep those hidden
# TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id
hidden_state_keywords = ['dynamath']
for answer_id in answer_ids:
for hidden_state_keyword in hidden_state_keywords:
if answer_id.find(hidden_state_keyword) >= 0:
student_answers.pop(answer_id)
# Next, generate a fresh LoncapaProblem
self.lcp = self.new_lcp(None)
self.set_state_from_lcp()
# Prepend a scary warning to the student
_ = self.runtime.service(self, "i18n").ugettext
warning_msg = _("Warning: The problem has been reset to its initial state!")
warning = '<div class="capa_reset"> <h2> ' + warning_msg + '</h2>'
# Translators: Following this message, there will be a bulleted list of items.
warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:")
warning += warning_msg + '<ul>'
for student_answer in student_answers.values():
if student_answer != '':
warning += '<li>' + cgi.escape(student_answer) + '</li>'
warning_msg = _('If this error persists, please contact the course staff.')
warning += '</ul>' + warning_msg + '</div>'
html = warning
try:
html += self.lcp.get_html()
except Exception:
# Couldn't do it. Give up.
log.exception("Unable to generate html from LoncapaProblem")
raise
return html
def get_demand_hint(self, hint_index):
"""
Return html for the problem.
Adds check, reset, save, and hint buttons as necessary based on the problem config
and state.
encapsulate: if True (the default) embed the html in a problem <div>
hint_index: (None is the default) if not None, this is the index of the next demand
hint to show.
"""
demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint")
hint_index = hint_index % len(demand_hints)
_ = self.runtime.service(self, "i18n").ugettext
hint_element = demand_hints[hint_index]
hint_text = get_inner_html_from_xpath(hint_element)
if len(demand_hints) == 1:
prefix = _('Hint: ')
else:
# Translators: e.g. "Hint 1 of 3" meaning we are showing the first of three hints.
prefix = _('Hint ({hint_num} of {hints_count}): ').format(hint_num=hint_index + 1,
hints_count=len(demand_hints))
# Log this demand-hint request
event_info = dict()
event_info['module_id'] = self.location.to_deprecated_string()
event_info['hint_index'] = hint_index
event_info['hint_len'] = len(demand_hints)
event_info['hint_text'] = hint_text
self.runtime.publish(self, 'edx.problem.hint.demandhint_displayed', event_info)
# We report the index of this hint, the client works out what index to use to get the next hint
return {
'success': True,
'contents': prefix + hint_text,
'hint_index': hint_index
}
def get_problem_html(self, encapsulate=True):
"""
Return html for the problem.
Adds check, reset, save, and hint buttons as necessary based on the problem config
and state.
encapsulate: if True (the default) embed the html in a problem <div>
"""
try:
html = self.lcp.get_html()
# If we cannot construct the problem HTML,
# then generate an error message instead.
except Exception as err: # pylint: disable=broad-except
html = self.handle_problem_html_error(err)
html = self.remove_tags_from_html(html)
# The convention is to pass the name of the check button if we want
# to show a check button, and False otherwise This works because
# non-empty strings evaluate to True. We use the same convention
# for the "checking" state text.
if self.should_show_check_button():
check_button = self.check_button_name()
check_button_checking = self.check_button_checking_name()
else:
check_button = False
check_button_checking = False
content = {
'name': self.display_name_with_default,
'html': html,
'weight': self.weight,
}
# If demand hints are available, emit hint button and div.
demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint")
demand_hint_possible = len(demand_hints) > 0
context = {
'problem': content,
'id': self.location.to_deprecated_string(),
'check_button': check_button,
'check_button_checking': check_button_checking,
'reset_button': self.should_show_reset_button(),
'save_button': self.should_show_save_button(),
'answer_available': self.answer_available(),
'attempts_used': self.attempts,
'attempts_allowed': self.max_attempts,
'demand_hint_possible': demand_hint_possible
}
html = self.runtime.render_template('problem.html', context)
if encapsulate:
html = u'<div id="problem_{id}" class="problem" data-url="{ajax_url}">'.format(
id=self.location.html_id(), ajax_url=self.runtime.ajax_url
) + html + "</div>"
# Now do all the substitutions which the LMS module_render normally does, but
# we need to do here explicitly since we can get called for our HTML via AJAX
html = self.runtime.replace_urls(html)
if self.runtime.replace_course_urls:
html = self.runtime.replace_course_urls(html)
if self.runtime.replace_jump_to_id_urls:
html = self.runtime.replace_jump_to_id_urls(html)
return html
def remove_tags_from_html(self, html):
"""
The capa xml includes many tags such as <additional_answer> or <demandhint> which are not
meant to be part of the client html. We strip them all and return the resulting html.
"""
tags = ['demandhint', 'choicehint', 'optionhint', 'stringhint', 'numerichint', 'optionhint',
'correcthint', 'regexphint', 'additional_answer', 'stringequalhint', 'compoundhint',
'stringequalhint']
for tag in tags:
html = re.sub(r'<%s.*?>.*?</%s>' % (tag, tag), '', html, flags=re.DOTALL)
# Some of these tags span multiple lines
# Note: could probably speed this up by calling sub() once with a big regex
# vs. simply calling sub() many times as we have here.
return html
def hint_button(self, data):
"""
Hint button handler, returns new html using hint_index from the client.
"""
hint_index = int(data['hint_index'])
return self.get_demand_hint(hint_index)
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
return (self.close_date is not None and
datetime.datetime.now(UTC()) > self.close_date)
def closed(self):
"""
Is the student still allowed to submit answers?
"""
if self.max_attempts is not None and self.attempts >= self.max_attempts:
return True
if self.is_past_due():
return True
return False
def is_submitted(self):
"""
Used to decide to show or hide RESET or CHECK buttons.
Means that student submitted problem and nothing more.
Problem can be completely wrong.
Pressing RESET button makes this function to return False.
"""
# used by conditional module
return self.lcp.done
def is_attempted(self):
"""
Has the problem been attempted?
used by conditional module
"""
return self.attempts > 0
def is_correct(self):
"""
True iff full points
"""
score_dict = self.get_score()
return score_dict['score'] == score_dict['total']
def answer_available(self):
"""
Is the user allowed to see an answer?
"""
if self.showanswer == '':
return False
elif self.showanswer == SHOWANSWER.NEVER:
return False
elif self.runtime.user_is_staff:
# This is after the 'never' check because admins can see the answer
# unless the problem explicitly prevents it
return True
elif self.showanswer == SHOWANSWER.ATTEMPTED:
return self.attempts > 0
elif self.showanswer == SHOWANSWER.ANSWERED:
# NOTE: this is slightly different from 'attempted' -- resetting the problems
# makes lcp.done False, but leaves attempts unchanged.
return self.lcp.done
elif self.showanswer == SHOWANSWER.CLOSED:
return self.closed()
elif self.showanswer == SHOWANSWER.FINISHED:
return self.closed() or self.is_correct()
elif self.showanswer == SHOWANSWER.CORRECT_OR_PAST_DUE:
return self.is_correct() or self.is_past_due()
elif self.showanswer == SHOWANSWER.PAST_DUE:
return self.is_past_due()
elif self.showanswer == SHOWANSWER.ALWAYS:
return True
return False
def update_score(self, data):
"""
Delivers grading response (e.g. from asynchronous code checking) to
the capa problem, so its score can be updated
'data' must have a key 'response' which is a string that contains the
grader's response
No ajax return is needed. Return empty dict.
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
self.lcp.update_score(score_msg, queuekey)
self.set_state_from_lcp()
self.publish_grade()
return dict() # No AJAX return is needed
def handle_ungraded_response(self, data):
"""
Delivers a response from the XQueue to the capa problem
The score of the problem will not be updated
Args:
- data (dict) must contain keys:
queuekey - a key specific to this response
xqueue_body - the body of the response
Returns:
empty dictionary
No ajax return is needed, so an empty dict is returned
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
# pass along the xqueue message to the problem
self.lcp.ungraded_response(score_msg, queuekey)
self.set_state_from_lcp()
return dict()
def handle_input_ajax(self, data):
"""
Handle ajax calls meant for a particular input in the problem
Args:
- data (dict) - data that should be passed to the input
Returns:
- dict containing the response from the input
"""
response = self.lcp.handle_input_ajax(data)
# save any state changes that may occur
self.set_state_from_lcp()
return response
def get_answer(self, _data):
"""
For the "show answer" button.
Returns the answers: {'answers' : answers}
"""
event_info = dict()
event_info['problem_id'] = self.location.to_deprecated_string()
self.track_function_unmask('showanswer', event_info)
if not self.answer_available():
raise NotFoundError('Answer is not available')
else:
answers = self.lcp.get_question_answers()
self.set_state_from_lcp()
# answers (eg <solution>) may have embedded images
# but be careful, some problems are using non-string answer dicts
new_answers = dict()
for answer_id in answers:
try:
answer_content = self.runtime.replace_urls(answers[answer_id])
if self.runtime.replace_jump_to_id_urls:
answer_content = self.runtime.replace_jump_to_id_urls(answer_content)
new_answer = {answer_id: answer_content}
except TypeError:
log.debug(u'Unable to perform URL substitution on answers[%s]: %s',
answer_id, answers[answer_id])
new_answer = {answer_id: answers[answer_id]}
new_answers.update(new_answer)
return {'answers': new_answers}
# Figure out if we should move these to capa_problem?
def get_problem(self, _data):
"""
Return results of get_problem_html, as a simple dict for json-ing.
{ 'html': <the-html> }
Used if we want to reconfirm we have the right thing e.g. after
several AJAX calls.
"""
return {'html': self.get_problem_html(encapsulate=False)}
@staticmethod
def make_dict_of_responses(data):
"""
Make dictionary of student responses (aka "answers")
`data` is POST dictionary (webob.multidict.MultiDict).
The `data` dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example,
'input_1_2_3' would be mapped to '1_2_3' in the returned dict.
Some inputs always expect a list in the returned dict
(e.g. checkbox inputs). The convention is that
keys in the `data` dict that end with '[]' will always
have list values in the returned dict.
For example, if the `data` dict contains {'input_1[]': 'test' }
then the output dict would contain {'1': ['test'] }
(the value is a list).
Some other inputs such as ChoiceTextInput expect a dict of values in the returned
dict If the key ends with '{}' then we will assume that the value is a json
encoded dict and deserialize it.
For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'}
then the output dict would contain {'1': {"1_2_1": 1} }
(the value is a dictionary)
Raises an exception if:
-A key in the `data` dictionary does not contain at least one underscore
(e.g. "input" is invalid, but "input_1" is valid)
-Two keys end up with the same name in the returned dict.
(e.g. 'input_1' and 'input_1[]', which both get mapped to 'input_1'
in the returned dict)
"""
answers = dict()
# webob.multidict.MultiDict is a view of a list of tuples,
# so it will return a multi-value key once for each value.
# We only want to consider each key a single time, so we use set(data.keys())
for key in set(data.keys()):
# e.g. input_resistor_1 ==> resistor_1
_, _, name = key.partition('_')
# If key has no underscores, then partition
# will return (key, '', '')
# We detect this and raise an error
if not name:
raise ValueError(u"{key} must contain at least one underscore".format(key=key))
else:
# This allows for answers which require more than one value for
# the same form input (e.g. checkbox inputs). The convention is that
# if the name ends with '[]' (which looks like an array), then the
# answer will be an array.
# if the name ends with '{}' (Which looks like a dict),
# then the answer will be a dict
is_list_key = name.endswith('[]')
is_dict_key = name.endswith('{}')
name = name[:-2] if is_list_key or is_dict_key else name
if is_list_key:
val = data.getall(key)
elif is_dict_key:
try:
val = json.loads(data[key])
# If the submission wasn't deserializable, raise an error.
except(KeyError, ValueError):
raise ValueError(
u"Invalid submission: {val} for {key}".format(val=data[key], key=key)
)
else:
val = data[key]
# If the name already exists, then we don't want
# to override it. Raise an error instead
if name in answers:
raise ValueError(u"Key {name} already exists in answers dict".format(name=name))
else:
answers[name] = val
return answers
def publish_grade(self):
"""
Publishes the student's current grade to the system as an event
"""
score = self.lcp.get_score()
self.runtime.publish(
self,
'grade',
{
'value': score['score'],
'max_value': score['total'],
}
)
return {'grade': score['score'], 'max_grade': score['total']}
# pylint: disable=too-many-statements
def check_problem(self, data, override_time=False):
"""
Checks whether answers to a problem are correct
Returns a map of correct/incorrect answers:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string,
'contents' : html}
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
answers_without_files = convert_files_to_filenames(answers)
event_info['answers'] = answers_without_files
metric_name = u'capa.check_problem.{}'.format
# Can override current time
current_time = datetime.datetime.now(UTC())
if override_time is not False:
current_time = override_time
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:closed'])
raise NotFoundError(_("Problem is closed."))
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'unreset'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:unreset'])
raise NotFoundError(_("Problem must be reset before it can be checked again."))
# Problem queued. Students must wait a specified waittime before they are allowed to submit
# IDEA: consider stealing code from below: pretty-print of seconds, cueing of time remaining
if self.lcp.is_queued():
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.runtime.xqueue['waittime']
if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
msg = _(u"You must wait at least {wait} seconds between submissions.").format(
wait=waittime_between_requests)
return {'success': msg, 'html': ''}
# Wait time between resets: check if is too soon for submission.
if self.last_submission_time is not None and self.submission_wait_seconds != 0:
if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds:
remaining_secs = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds())
msg = _(u'You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.').format(
wait_secs=self.pretty_print_seconds(self.submission_wait_seconds),
remaining_secs=self.pretty_print_seconds(remaining_secs))
return {
'success': msg,
'html': ''
}
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
if self.runtime.DEBUG:
log.warning(
"StudentInputError in capa_module:problem_check",
exc_info=True
)
# Save the user's state before failing
self.set_state_from_lcp()
# If the user is a staff member, include
# the full exception, including traceback,
# in the response
if self.runtime.user_is_staff:
msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))
# Otherwise, display just an error message,
# without a stack trace
else:
# Translators: {msg} will be replaced with a problem's error message.
msg = _(u"Error: {msg}").format(msg=inst.message)
return {'success': msg}
except Exception as err:
# Save the user's state before failing
self.set_state_from_lcp()
if self.runtime.DEBUG:
msg = u"Error checking problem: {}".format(err.message)
msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
return {'success': msg}
raise
published_grade = self.publish_grade()
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['grade'] = published_grade['grade']
event_info['max_grade'] = published_grade['max_grade']
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
self.track_function_unmask('problem_check', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:success'])
if published_grade['max_grade'] != 0:
dog_stats_api.histogram(
metric_name('correct_pct'),
float(published_grade['grade']) / published_grade['max_grade'],
)
dog_stats_api.histogram(
metric_name('attempts'),
self.attempts,
)
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
return {
'success': success,
'contents': html
}
# pylint: enable=too-many-statements
def track_function_unmask(self, title, event_info):
"""
All calls to runtime.track_function route through here so that the
choice names can be unmasked.
"""
# Do the unmask translates on a copy of event_info,
# avoiding problems where an event_info is unmasked twice.
event_unmasked = copy.deepcopy(event_info)
self.unmask_event(event_unmasked)
self.runtime.publish(self, title, event_unmasked)
def unmask_event(self, event_info):
"""
Translates in-place the event_info to account for masking
and adds information about permutation options in force.
"""
# answers is like: {u'i4x-Stanford-CS99-problem-dada976e76f34c24bc8415039dee1300_2_1': u'mask_0'}
# Each response values has an answer_id which matches the key in answers.
for response in self.lcp.responders.values():
# Un-mask choice names in event_info for masked responses.
if response.has_mask():
# We don't assume much about the structure of event_info,
# but check for the existence of the things we need to un-mask.
# Look for answers/id
answer = event_info.get('answers', {}).get(response.answer_id)
if answer is not None:
event_info['answers'][response.answer_id] = response.unmask_name(answer)
# Look for state/student_answers/id
answer = event_info.get('state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Look for old_state/student_answers/id -- parallel to the above case, happens on reset
answer = event_info.get('old_state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['old_state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Add 'permutation' to event_info for permuted responses.
permutation_option = None
if response.has_shuffle():
permutation_option = 'shuffle'
elif response.has_answerpool():
permutation_option = 'answerpool'
if permutation_option is not None:
# Add permutation record tuple: (one of:'shuffle'/'answerpool', [as-displayed list])
if 'permutation' not in event_info:
event_info['permutation'] = {}
event_info['permutation'][response.answer_id] = (permutation_option, response.unmask_order())
def pretty_print_seconds(self, num_seconds):
"""
Returns time duration nicely formated, e.g. "3 minutes 4 seconds"
"""
# Here _ is the N variant ungettext that does pluralization with a 3-arg call
ungettext = self.runtime.service(self, "i18n").ungettext
hours = num_seconds // 3600
sub_hour = num_seconds % 3600
minutes = sub_hour // 60
seconds = sub_hour % 60
display = ""
if hours > 0:
display += ungettext("{num_hour} hour", "{num_hour} hours", hours).format(num_hour=hours)
if minutes > 0:
if display != "":
display += " "
# translators: "minute" refers to a minute of time
display += ungettext("{num_minute} minute", "{num_minute} minutes", minutes).format(num_minute=minutes)
# Taking care to make "0 seconds" instead of "" for 0 time
if seconds > 0 or (hours == 0 and minutes == 0):
if display != "":
display += " "
# translators: "second" refers to a second of time
display += ungettext("{num_second} second", "{num_second} seconds", seconds).format(num_second=seconds)
return display
def get_submission_metadata_safe(self, answers, correct_map):
"""
Ensures that no exceptions are thrown while generating input metadata summaries. Returns the
summary if it is successfully created, otherwise an empty dictionary.
"""
try:
return self.get_submission_metadata(answers, correct_map)
except Exception: # pylint: disable=broad-except
# NOTE: The above process requires deep inspection of capa structures that may break for some
# uncommon problem types. Ensure that it does not prevent answer submission in those
# cases. Any occurrences of errors in this block should be investigated and resolved.
log.exception('Unable to gather submission metadata, it will not be included in the event.')
return {}
def get_submission_metadata(self, answers, correct_map):
"""
Return a map of inputs to their corresponding summarized metadata.
Returns:
A map whose keys are a unique identifier for the input (in this case a capa input_id) and
whose values are:
question (str): Is the prompt that was presented to the student. It corresponds to the
label of the input.
answer (mixed): Is the answer the student provided. This may be a rich structure,
however it must be json serializable.
response_type (str): The XML tag of the capa response type.
input_type (str): The XML tag of the capa input type.
correct (bool): Whether or not the provided answer is correct. Will be an empty
string if correctness could not be determined.
variant (str): In some cases the same question can have several different variants.
This string should uniquely identify the variant of the question that was answered.
In the capa context this corresponds to the `seed`.
This function attempts to be very conservative and make very few assumptions about the structure
of the problem. If problem related metadata cannot be located it should be replaced with empty
strings ''.
"""
input_metadata = {}
for input_id, internal_answer in answers.iteritems():
answer_input = self.lcp.inputs.get(input_id)
if answer_input is None:
log.warning('Input id %s is not mapped to an input type.', input_id)
answer_response = None
for response, responder in self.lcp.responders.iteritems():
if input_id in responder.answer_ids:
answer_response = responder
if answer_response is None:
log.warning('Answer responder could not be found for input_id %s.', input_id)
user_visible_answer = internal_answer
if hasattr(answer_input, 'get_user_visible_answer'):
user_visible_answer = answer_input.get_user_visible_answer(internal_answer)
# If this problem has rerandomize enabled, then it will generate N variants of the
# question, one per unique seed value. In this case we would like to know which
# variant was selected. Ideally it would be nice to have the exact question that
# was presented to the user, with values interpolated etc, but that can be done
# later if necessary.
variant = ''
if self.rerandomize != RANDOMIZATION.NEVER:
variant = self.seed
is_correct = correct_map.is_correct(input_id)
if is_correct is None:
is_correct = ''
input_metadata[input_id] = {
'question': answer_input.response_data.get('label', ''),
'answer': user_visible_answer,
'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),
'input_type': getattr(answer_input, 'tag', ''),
'correct': is_correct,
'variant': variant,
}
# Add group_label in event data only if the responsetype contains multiple inputtypes
if answer_input.response_data.get('group_label'):
input_metadata[input_id]['group_label'] = answer_input.response_data.get('group_label')
return input_metadata
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.to_deprecated_string()}
_ = self.runtime.service(self, "i18n").ugettext
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.track_function_unmask('problem_rescore_fail', event_info)
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise NotImplementedError(_("Problem's definition does not support rescoring."))
if not self.done:
event_info['failure'] = 'unanswered'
self.track_function_unmask('problem_rescore_fail', event_info)
raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.track_function_unmask('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.track_function_unmask('problem_rescore_fail', event_info)
if self.runtime.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.track_function_unmask('problem_rescore', event_info)
return {'success': success}
def save_problem(self, data):
"""
Save the passed in answers.
Returns a dict { 'success' : bool, 'msg' : message }
The message is informative on success, and an error message on failure.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
event_info['answers'] = answers
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed() and not self.max_attempts == 0:
event_info['failure'] = 'closed'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'msg': _("Problem is closed.")
}
# Problem submitted. Student should reset before saving
# again.
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'done'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
'msg': _("Problem needs to be reset prior to save.")
}
self.lcp.student_answers = answers
self.set_state_from_lcp()
self.track_function_unmask('save_problem_success', event_info)
msg = _("Your answers have been saved.")
if not self.max_attempts == 0:
msg = _(
"Your answers have been saved but not graded. Click '{button_name}' to grade them."
).format(button_name=self.check_button_name())
return {
'success': True,
'msg': msg,
'html': self.get_problem_html(encapsulate=False),
}
def reset_problem(self, _data):
"""
Changes problem state to unfinished -- removes student answers,
Causes problem to rerender itself if randomization is enabled.
Returns a dictionary of the form:
{'success': True/False,
'html': Problem HTML string }
If an error occurs, the dictionary will also have an
`error` key containing an error message.
"""
event_info = dict()
event_info['old_state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
_ = self.runtime.service(self, "i18n").ugettext
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'error': _("Problem is closed."),
}
if not self.is_submitted():
event_info['failure'] = 'not_done'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: A student must "make an attempt" to solve the problem on the page before they can reset it.
'error': _("Refresh the page and make an attempt before resetting."),
}
if self.is_submitted() and self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET]:
# Reset random number generator seed.
self.choose_new_seed()
# Generate a new problem with either the previous seed or a new seed
self.lcp = self.new_lcp(None)
# Pull in the new problem seed
self.set_state_from_lcp()
# Grade may have changed, so publish new value
self.publish_grade()
event_info['new_state'] = self.lcp.get_state()
self.track_function_unmask('reset_problem', event_info)
return {
'success': True,
'html': self.get_problem_html(encapsulate=False),
}
| chrisndodge/edx-platform | common/lib/xmodule/xmodule/capa_base.py | Python | agpl-3.0 | 62,222 | 0.002588 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2011 Nick Hall
# Copyright (C) 2011 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Module that implements the gramplet bar fuctionality.
"""
#-------------------------------------------------------------------------
#
# Set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger('.grampletbar')
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import time
import os
import configparser
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.const import URL_MANUAL_PAGE, URL_WIKISTRING, VERSION_DIR
from gramps.gen.config import config
from gramps.gen.constfunc import win
from ..managedwindow import ManagedWindow
from ..display import display_help, display_url
from .grampletpane import (AVAILABLE_GRAMPLETS,
GET_AVAILABLE_GRAMPLETS,
GET_GRAMPLET_LIST,
get_gramplet_opts,
get_gramplet_options_by_name,
make_requested_gramplet,
GuiGramplet)
from .undoablebuffer import UndoableBuffer
from ..utils import is_right_click
from ..dialog import QuestionDialog
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = URL_WIKISTRING + URL_MANUAL_PAGE + '_-_Gramplets'
WIKI_HELP_GRAMPLETBAR = URL_WIKISTRING + URL_MANUAL_PAGE + '_-_Main_Window#Gramplet_Bar_Menu'
WIKI_HELP_ABOUT_GRAMPLETS = URL_WIKISTRING + URL_MANUAL_PAGE + '_-_Gramplets#What_is_a_Gramplet'
NL = "\n"
#-------------------------------------------------------------------------
#
# GrampletBar class
#
#-------------------------------------------------------------------------
class GrampletBar(Gtk.Notebook):
"""
A class which defines the graphical representation of the GrampletBar.
"""
def __init__(self, dbstate, uistate, pageview, configfile, defaults):
Gtk.Notebook.__init__(self)
self.dbstate = dbstate
self.uistate = uistate
self.pageview = pageview
self.configfile = os.path.join(VERSION_DIR, "%s.ini" % configfile)
self.defaults = defaults
self.detached_gramplets = []
self.empty = False
self.close_buttons = []
self.set_group_name("grampletbar")
self.set_show_border(False)
self.set_scrollable(True)
book_button = Gtk.Button()
# Arrow is too small unless in a box
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
arrow = Gtk.Arrow(arrow_type=Gtk.ArrowType.DOWN,
shadow_type=Gtk.ShadowType.NONE)
arrow.show()
box.add(arrow)
box.show()
book_button.add(box)
book_button.set_relief(Gtk.ReliefStyle.NONE)
book_button.connect('clicked', self.__button_clicked)
book_button.set_property("tooltip-text", _("Gramplet Bar Menu"))
book_button.show()
self.set_action_widget(book_button, Gtk.PackType.END)
self.connect('page-added', self.__page_added)
self.connect('page-removed', self.__page_removed)
self.connect('create-window', self.__create_window)
config_settings, opts_list = self.__load(defaults)
opts_list.sort(key=lambda opt: opt["page"])
for opts in opts_list:
if opts["name"] in AVAILABLE_GRAMPLETS():
all_opts = get_gramplet_opts(opts["name"], opts)
gramplet = make_requested_gramplet(TabGramplet, self, all_opts,
self.dbstate, self.uistate)
if gramplet:
self.__add_tab(gramplet)
if len(opts_list) == 0:
self.empty = True
self.__create_empty_tab()
if config_settings[0]:
self.show()
self.set_current_page(config_settings[1])
uistate.connect('grampletbar-close-changed', self.cb_close_changed)
# Connect after gramplets added to prevent making them active
self.connect('switch-page', self.__switch_page)
def _get_config_setting(self, configparser, section, setting, fn=None):
"""
Get a section.setting value from the config parser.
Takes a configparser instance, a section, a setting, and
optionally a post-processing function (typically int).
Always returns a value of the appropriate type.
"""
value = ""
try:
value = configparser.get(section, setting)
value = value.strip()
if fn:
value = fn(value)
except:
if fn:
value = fn()
else:
value = ""
return value
def __load(self, defaults):
"""
Load the gramplets from the configuration file.
"""
retval = []
visible = True
default_page = 0
filename = self.configfile
if filename and os.path.exists(filename):
cp = configparser.ConfigParser()
try:
cp.read(filename, encoding='utf-8')
except:
pass
for sec in cp.sections():
if sec == "Bar Options":
if "visible" in cp.options(sec):
visible = self._get_config_setting(cp, sec, "visible") == "True"
if "page" in cp.options(sec):
default_page = self._get_config_setting(cp, sec, "page", int)
else:
data = {}
for opt in cp.options(sec):
if opt.startswith("data["):
temp = data.get("data", {})
#temp.append(self._get_config_setting(cp, sec, opt))
pos = int(opt[5:-1])
temp[pos] = self._get_config_setting(cp, sec, opt)
data["data"] = temp
else:
data[opt] = self._get_config_setting(cp, sec, opt)
if "data" in data:
data["data"] = [data["data"][key]
for key in sorted(data["data"].keys())]
if "name" not in data:
data["name"] = "Unnamed Gramplet"
data["tname"] = _("Unnamed Gramplet")
retval.append(data)
else:
# give defaults as currently known
for name in defaults:
if name in AVAILABLE_GRAMPLETS():
retval.append(GET_AVAILABLE_GRAMPLETS(name))
return ((visible, default_page), retval)
def __save(self):
"""
Save the gramplet configuration.
"""
filename = self.configfile
try:
with open(filename, "w", encoding='utf-8') as fp:
fp.write(";; Gramplet bar configuration file" + NL)
fp.write((";; Automatically created at %s" %
time.strftime("%Y/%m/%d %H:%M:%S")) + NL + NL)
fp.write("[Bar Options]" + NL)
fp.write(("visible=%s" + NL) % self.get_property('visible'))
fp.write(("page=%d" + NL) % self.get_current_page())
fp.write(NL)
if self.empty:
gramplet_list = []
else:
gramplet_list = [self.get_nth_page(page_num)
for page_num in range(self.get_n_pages())]
for page_num, gramplet in enumerate(gramplet_list):
opts = get_gramplet_options_by_name(gramplet.gname)
if opts is not None:
base_opts = opts.copy()
for key in base_opts:
if key in gramplet.__dict__:
base_opts[key] = gramplet.__dict__[key]
fp.write(("[%s]" + NL) % gramplet.gname)
for key in base_opts:
if key in ["content", "title", "tname", "row", "column",
"page", "version", "gramps"]: # don't save
continue
elif key == "data":
if not isinstance(base_opts["data"], (list, tuple)):
fp.write(("data[0]=%s" + NL) % base_opts["data"])
else:
cnt = 0
for item in base_opts["data"]:
fp.write(("data[%d]=%s" + NL) % (cnt, item))
cnt += 1
else:
fp.write(("%s=%s" + NL)% (key, base_opts[key]))
fp.write(("page=%d" + NL) % page_num)
fp.write(NL)
except IOError:
LOG.warning("Failed writing '%s'; gramplets not saved" % filename)
return
def set_active(self):
"""
Called with the view is set as active.
"""
if not self.empty:
gramplet = self.get_nth_page(self.get_current_page())
if gramplet and gramplet.pui:
gramplet.pui.active = True
if gramplet.pui.dirty:
gramplet.pui.update()
def set_inactive(self):
"""
Called with the view is set as inactive.
"""
if not self.empty:
gramplet = self.get_nth_page(self.get_current_page())
if gramplet and gramplet.pui:
gramplet.pui.active = False
def on_delete(self):
"""
Called when the view is closed.
"""
list(map(self.__dock_gramplet, self.detached_gramplets))
if not self.empty:
for page_num in range(self.get_n_pages()):
gramplet = self.get_nth_page(page_num)
# this is the only place where the gui runs user code directly
if gramplet.pui:
gramplet.pui.on_save()
self.__save()
def add_gramplet(self, gname):
"""
Add a gramplet by name.
"""
if self.has_gramplet(gname):
return
all_opts = get_gramplet_options_by_name(gname)
gramplet = make_requested_gramplet(TabGramplet, self, all_opts,
self.dbstate, self.uistate)
if not gramplet:
LOG.warning("Problem creating '%s'", gname)
return
page_num = self.__add_tab(gramplet)
self.set_current_page(page_num)
def remove_gramplet(self, gname):
"""
Remove a gramplet by name.
"""
for gramplet in self.detached_gramplets:
if gramplet.gname == gname:
self.__dock_gramplet(gramplet)
self.remove_page(self.page_num(gramplet))
return
for page_num in range(self.get_n_pages()):
gramplet = self.get_nth_page(page_num)
if gramplet.gname == gname:
self.remove_page(page_num)
return
def has_gramplet(self, gname):
"""
Return True if the GrampletBar contains the gramplet, else False.
"""
return gname in self.all_gramplets()
def all_gramplets(self):
"""
Return a list of names of all the gramplets in the GrampletBar.
"""
if self.empty:
return self.detached_gramplets
else:
return [gramplet.gname for gramplet in self.get_children() +
self.detached_gramplets]
def restore(self):
"""
Restore the GrampletBar to its default gramplets.
"""
list(map(self.remove_gramplet, self.all_gramplets()))
list(map(self.add_gramplet, self.defaults))
self.set_current_page(0)
def __create_empty_tab(self):
"""
Create an empty tab to be displayed when the GrampletBar is empty.
"""
tab_label = Gtk.Label(label=_('Gramplet Bar'))
tab_label.show()
msg = _('Select the down arrow on the right corner for adding, removing or restoring gramplets.')
content = Gtk.Label(label=msg)
content.set_halign(Gtk.Align.START)
content.set_line_wrap(True)
content.set_size_request(150, -1)
content.show()
self.append_page(content, tab_label)
return content
def __add_tab(self, gramplet):
"""
Add a tab to the notebook for the given gramplet.
"""
label = self.__create_tab_label(gramplet)
page_num = self.append_page(gramplet, label)
return page_num
def __create_tab_label(self, gramplet):
"""
Create a tab label consisting of a label and a close button.
"""
tablabel = TabLabel(gramplet, self.__delete_clicked)
if hasattr(gramplet.pui, "has_data"):
tablabel.set_has_data(gramplet.pui.has_data)
else: # just a function; always show yes it has data
tablabel.set_has_data(True)
if config.get('interface.grampletbar-close'):
tablabel.use_close(True)
else:
tablabel.use_close(False)
return tablabel
def cb_close_changed(self):
"""
Close button preference changed.
"""
for gramplet in self.get_children():
tablabel = self.get_tab_label(gramplet)
if not isinstance(tablabel, Gtk.Label):
tablabel.use_close(config.get('interface.grampletbar-close'))
def __delete_clicked(self, button, gramplet):
"""
Called when the delete button is clicked.
"""
page_num = self.page_num(gramplet)
self.remove_page(page_num)
def __switch_page(self, notebook, unused, new_page):
"""
Called when the user has switched to a new GrampletBar page.
"""
old_page = notebook.get_current_page()
if old_page >= 0:
gramplet = self.get_nth_page(old_page)
if gramplet and gramplet.pui:
gramplet.pui.active = False
gramplet = self.get_nth_page(new_page)
if not self.empty:
if gramplet and gramplet.pui:
gramplet.pui.active = True
if gramplet.pui.dirty:
gramplet.pui.update()
def __page_added(self, notebook, unused, new_page):
"""
Called when a new page is added to the GrampletBar.
"""
gramplet = self.get_nth_page(new_page)
if self.empty:
if isinstance(gramplet, TabGramplet):
self.empty = False
if new_page == 0:
self.remove_page(1)
else:
self.remove_page(0)
else:
return
gramplet.pane = self
label = self.__create_tab_label(gramplet)
self.set_tab_label(gramplet, label)
self.set_tab_reorderable(gramplet, True)
self.set_tab_detachable(gramplet, True)
if gramplet in self.detached_gramplets:
self.detached_gramplets.remove(gramplet)
self.reorder_child(gramplet, gramplet.page)
def __page_removed(self, notebook, unused, page_num):
"""
Called when a page is removed to the GrampletBar.
"""
if self.get_n_pages() == 0:
self.empty = True
self.__create_empty_tab()
def __create_window(self, grampletbar, gramplet, x_pos, y_pos):
"""
Called when the user has switched to a new GrampletBar page.
"""
gramplet.page = self.page_num(gramplet)
self.detached_gramplets.append(gramplet)
win = DetachedWindow(grampletbar, gramplet, x_pos, y_pos)
gramplet.detached_window = win
return win.get_notebook()
def __dock_gramplet(self, gramplet):
"""
Dock a detached gramplet.
"""
gramplet.detached_window.close()
gramplet.detached_window = None
def __button_clicked(self, button):
"""
Called when the drop-down button is clicked.
"""
self.menu = Gtk.Menu()
menu = self.menu
ag_menu = Gtk.MenuItem(label=_('Add a gramplet'))
nav_type = self.pageview.navigation_type()
skip = self.all_gramplets()
gramplet_list = GET_GRAMPLET_LIST(nav_type, skip)
gramplet_list.sort()
self.__create_submenu(ag_menu, gramplet_list, self.__add_clicked)
ag_menu.show()
menu.append(ag_menu)
if not (self.empty or config.get('interface.grampletbar-close')):
rg_menu = Gtk.MenuItem(label=_('Remove a gramplet'))
gramplet_list = [(gramplet.title, gramplet.gname)
for gramplet in self.get_children() +
self.detached_gramplets]
gramplet_list.sort()
self.__create_submenu(rg_menu, gramplet_list,
self.__remove_clicked)
rg_menu.show()
menu.append(rg_menu)
rd_menu = Gtk.MenuItem(label=_('Restore default gramplets'))
rd_menu.connect("activate", self.__restore_clicked)
rd_menu.show()
menu.append(rd_menu)
# Separator.
rs_menu = Gtk.SeparatorMenuItem()
rs_menu.show()
menu.append(rs_menu)
rh_menu = Gtk.MenuItem(label=_('Gramplet Bar Help'))
rh_menu.connect("activate", self.on_help_grampletbar_clicked)
rh_menu.show()
menu.append(rh_menu)
rg_menu = Gtk.MenuItem(label=_('About Gramplets'))
rg_menu.connect("activate", self.on_help_gramplets_clicked)
rg_menu.show()
menu.append(rg_menu)
menu.show_all()
menu.popup(None, None, cb_menu_position, button, 0, 0)
def __create_submenu(self, main_menu, gramplet_list, callback_func):
"""
Create a submenu of the context menu.
"""
if main_menu:
submenu = main_menu.get_submenu()
submenu = Gtk.Menu()
for entry in gramplet_list:
item = Gtk.MenuItem(label=entry[0])
item.connect("activate", callback_func, entry[1])
item.show()
submenu.append(item)
main_menu.set_submenu(submenu)
def __add_clicked(self, menu, gname):
"""
Called when a gramplet is added from the context menu.
"""
self.add_gramplet(gname)
def __remove_clicked(self, menu, gname):
"""
Called when a gramplet is removed from the context menu.
"""
self.remove_gramplet(gname)
def __restore_clicked(self, menu):
"""
Called when restore defaults is clicked from the context menu.
"""
QuestionDialog(
_("Restore to defaults?"),
_("The gramplet bar will be restored to contain its default "
"gramplets. This action cannot be undone."),
_("OK"),
self.restore,
parent=self.uistate.window)
def get_config_funcs(self):
"""
Return a list of configuration functions.
"""
funcs = []
if self.empty:
gramplets = []
else:
gramplets = self.get_children()
for gramplet in gramplets + self.detached_gramplets:
gui_options = gramplet.make_gui_options()
if gui_options:
funcs.append(self.__build_panel(gramplet.title, gui_options))
return funcs
def __build_panel(self, title, gui_options):
"""
Return a configuration function that returns the title of a page in
the Configure View dialog and a gtk container defining the page.
"""
def gramplet_panel(configdialog):
return title, gui_options
return gramplet_panel
def on_help_grampletbar_clicked(self, dummy):
""" Button: Display the relevant portion of Gramps manual"""
display_url(WIKI_HELP_GRAMPLETBAR)
def on_help_gramplets_clicked(self, dummy):
""" Button: Display the relevant portion of Gramps manual"""
display_url(WIKI_HELP_ABOUT_GRAMPLETS)
#-------------------------------------------------------------------------
#
# TabGramplet class
#
#-------------------------------------------------------------------------
class TabGramplet(Gtk.ScrolledWindow, GuiGramplet):
"""
Class that handles the plugin interfaces for the GrampletBar.
"""
def __init__(self, pane, dbstate, uistate, title, **kwargs):
"""
Internal constructor for GUI portion of a gramplet.
"""
Gtk.ScrolledWindow.__init__(self)
GuiGramplet.__init__(self, pane, dbstate, uistate, title, **kwargs)
self.scrolledwindow = self
self.textview = Gtk.TextView()
self.textview.set_editable(False)
self.textview.set_wrap_mode(Gtk.WrapMode.WORD)
self.buffer = UndoableBuffer()
self.text_length = 0
self.textview.set_buffer(self.buffer)
self.textview.connect("key-press-event", self.on_key_press_event)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.add(self.textview)
self.show_all()
self.track = []
def get_title(self):
return self.title
def get_container_widget(self):
"""
Return the top level container widget.
"""
return self
#-------------------------------------------------------------------------
#
# DetachedWindow class
#
#-------------------------------------------------------------------------
class DetachedWindow(ManagedWindow):
"""
Class for showing a detached gramplet.
"""
def __init__(self, grampletbar, gramplet, x_pos, y_pos):
"""
Construct the window.
"""
self.title = gramplet.title + " " + _("Gramplet")
self.grampletbar = grampletbar
self.gramplet = gramplet
ManagedWindow.__init__(self, gramplet.uistate, [], self.title)
dlg = Gtk.Dialog(transient_for=gramplet.uistate.window,
destroy_with_parent = True)
dlg.add_button(_('_Close'), Gtk.ResponseType.CLOSE)
self.set_window(dlg, None, self.title)
self.window.move(x_pos, y_pos)
self.window.set_default_size(gramplet.detached_width,
gramplet.detached_height)
self.window.add_button(_('_Help'), Gtk.ResponseType.HELP)
self.window.connect('response', self.handle_response)
self.notebook = Gtk.Notebook()
self.notebook.set_show_tabs(False)
self.notebook.set_show_border(False)
self.notebook.connect('page-added', self.page_added)
self.notebook.show()
self.window.vbox.pack_start(self.notebook, True, True, 0)
self.show()
def page_added(self, notebook, gramplet, page_num):
"""
Called when the gramplet is added to the notebook. This takes the
focus from the help button (bug #6306).
"""
gramplet.grab_focus()
def handle_response(self, object, response):
"""
Callback for taking care of button clicks.
"""
if response == Gtk.ResponseType.CLOSE:
self.close()
elif response == Gtk.ResponseType.HELP:
# translated name:
if self.gramplet.help_url:
if self.gramplet.help_url.startswith("http://"):
display_url(self.gramplet.help_url)
else:
display_help(self.gramplet.help_url)
else:
display_help(WIKI_HELP_PAGE,
self.gramplet.tname.replace(" ", "_"))
def get_notebook(self):
"""
Return the notebook.
"""
return self.notebook
def build_menu_names(self, obj):
"""
Part of the Gramps window interface.
"""
return (self.title, 'Gramplet')
def get_title(self):
"""
Returns the window title.
"""
return self.title
def close(self, *args):
"""
Dock the detached gramplet back in the GrampletBar from where it came.
"""
size = self.window.get_size()
self.gramplet.detached_width = size[0]
self.gramplet.detached_height = size[1]
self.gramplet.detached_window = None
self.notebook.remove(self.gramplet)
self.grampletbar.add(self.gramplet)
ManagedWindow.close(self, *args)
#-------------------------------------------------------------------------
#
# TabLabel class
#
#-------------------------------------------------------------------------
class TabLabel(Gtk.Box):
"""
Create a tab label consisting of a label and a close button.
"""
def __init__(self, gramplet, callback):
Gtk.Box.__init__(self)
self.text = gramplet.title
self.set_spacing(4)
self.label = Gtk.Label()
self.label.set_tooltip_text(gramplet.tname)
self.label.show()
self.closebtn = Gtk.Button()
image = Gtk.Image()
image.set_from_icon_name('window-close', Gtk.IconSize.MENU)
self.closebtn.connect("clicked", callback, gramplet)
self.closebtn.set_image(image)
self.closebtn.set_relief(Gtk.ReliefStyle.NONE)
self.pack_start(self.label, True, True, 0)
self.pack_end(self.closebtn, False, False, 0)
def set_has_data(self, has_data):
"""
Set the label to indicate if the gramplet has data.
"""
if has_data:
self.label.set_text("<b>%s</b>" % self.text)
self.label.set_use_markup(True)
else:
self.label.set_text(self.text)
def use_close(self, use_close):
"""
Display the cose button according to user preference.
"""
if use_close:
self.closebtn.show()
else:
self.closebtn.hide()
def cb_menu_position(*args):
"""
Determine the position of the popup menu.
"""
# takes two argument: menu, button
if len(args) == 2:
menu = args[0]
button = args[1]
# broken introspection can't handle MenuPositionFunc annotations corectly
else:
menu = args[0]
button = args[3]
ret_val, x_pos, y_pos = button.get_window().get_origin()
x_pos += button.get_allocation().x
y_pos += button.get_allocation().y + button.get_allocation().height
return (x_pos, y_pos, False)
| gramps-project/gramps | gramps/gui/widgets/grampletbar.py | Python | gpl-2.0 | 28,336 | 0.002188 |
#!/usr/bin/env python
import re
from ciscoconfparse import CiscoConfParse
def main():
'''
using the ciscoconfparse to find the crypto maps that are not using AES
'''
cisco_file = 'cisco_ipsec.txt'
cisco_cfg = CiscoConfParse(cisco_file)
crypto_maps = cisco_cfg.find_objects_wo_child(parentspec=r"^crypto map CRYPTO", childspec=r"AES")
print "\n Crypto Maps not using AES:"
for entry in crypto_maps:
for child in entry.children:
if 'transform' in child.text:
match = re.search(r"set transform-set (.*)$", child.text)
encryption = match.group(1)
print " {0} >>> {1}".format(entry.text.strip(), encryption)
print
if __name__ == "__main__":
main()
| hbenaouich/Learning-Python | class-1/ex10_confParse.py | Python | apache-2.0 | 760 | 0.009211 |
# -*- coding: utf-8 -*-
"""
flaskbb.management.models
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains all management related models.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from wtforms import (TextField, IntegerField, FloatField, BooleanField,
SelectField, SelectMultipleField, validators)
from flask_wtf import Form
from openspending.forum._compat import max_integer, text_type, iteritems
from openspending.core import db, cache
from openspending.forum.utils.database import CRUDMixin
class SettingsGroup(db.Model, CRUDMixin):
__tablename__ = "forum_settingsgroup"
key = db.Column(db.String(255), primary_key=True)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=False)
settings = db.relationship("Setting", lazy="dynamic", backref="group",
cascade="all, delete-orphan")
class Setting(db.Model, CRUDMixin):
__tablename__ = "forum_settings"
key = db.Column(db.String(255), primary_key=True)
value = db.Column(db.PickleType, nullable=False)
settingsgroup = db.Column(db.String,
db.ForeignKey('forum_settingsgroup.key',
use_alter=True,
name="fk_settingsgroup"),
nullable=False)
# The name (displayed in the form)
name = db.Column(db.String(200), nullable=False)
# The description (displayed in the form)
description = db.Column(db.Text, nullable=False)
# Available types: string, integer, float, boolean, select, selectmultiple
value_type = db.Column(db.String(20), nullable=False)
# Extra attributes like, validation things (min, max length...)
# For Select*Fields required: choices
extra = db.Column(db.PickleType)
@classmethod
def get_form(cls, group):
"""Returns a Form for all settings found in :class:`SettingsGroup`.
:param group: The settingsgroup name. It is used to get the settings
which are in the specified group.
"""
class SettingsForm(Form):
pass
# now parse the settings in this group
for setting in group.settings:
field_validators = []
if setting.value_type in ("integer", "float"):
validator_class = validators.NumberRange
elif setting.value_type == "string":
validator_class = validators.Length
# generate the validators
if "min" in setting.extra:
# Min number validator
field_validators.append(
validator_class(min=setting.extra["min"])
)
if "max" in setting.extra:
# Max number validator
field_validators.append(
validator_class(max=setting.extra["max"])
)
# Generate the fields based on value_type
# IntegerField
if setting.value_type == "integer":
setattr(
SettingsForm, setting.key,
IntegerField(setting.name, validators=field_validators,
description=setting.description)
)
# FloatField
elif setting.value_type == "float":
setattr(
SettingsForm, setting.key,
FloatField(setting.name, validators=field_validators,
description=setting.description)
)
# TextField
elif setting.value_type == "string":
setattr(
SettingsForm, setting.key,
TextField(setting.name, validators=field_validators,
description=setting.description)
)
# SelectMultipleField
elif setting.value_type == "selectmultiple":
# if no coerce is found, it will fallback to unicode
if "coerce" in setting.extra:
coerce_to = setting.extra['coerce']
else:
coerce_to = text_type
setattr(
SettingsForm, setting.key,
SelectMultipleField(
setting.name,
choices=setting.extra['choices'](),
coerce=coerce_to,
description=setting.description
)
)
# SelectField
elif setting.value_type == "select":
# if no coerce is found, it will fallback to unicode
if "coerce" in setting.extra:
coerce_to = setting.extra['coerce']
else:
coerce_to = text_type
setattr(
SettingsForm, setting.key,
SelectField(
setting.name,
coerce=coerce_to,
choices=setting.extra['choices'](),
description=setting.description)
)
# BooleanField
elif setting.value_type == "boolean":
setattr(
SettingsForm, setting.key,
BooleanField(setting.name, description=setting.description)
)
return SettingsForm
@classmethod
def get_all(cls):
return cls.query.all()
@classmethod
def update(cls, settings, app=None):
"""Updates the cache and stores the changes in the
database.
:param settings: A dictionary with setting items.
"""
# update the database
for key, value in iteritems(settings):
setting = cls.query.filter(Setting.key == key.lower()).first()
setting.value = value
db.session.add(setting)
db.session.commit()
cls.invalidate_cache()
@classmethod
def get_settings(cls, from_group=None):
"""This will return all settings with the key as the key for the dict
and the values are packed again in a dict which contains
the remaining attributes.
:param from_group: Optionally - Returns only the settings from a group.
"""
result = None
if from_group is not None:
result = from_group.settings
else:
result = cls.query.all()
settings = {}
for setting in result:
settings[setting.key] = {
'name': setting.name,
'description': setting.description,
'value': setting.value,
'value_type': setting.value_type,
'extra': setting.extra
}
return settings
@classmethod
@cache.memoize(timeout=max_integer)
def as_dict(cls, from_group=None, upper=True):
"""Returns all settings as a dict. This method is cached. If you want
to invalidate the cache, simply execute ``self.invalidate_cache()``.
:param from_group: Returns only the settings from the group as a dict.
:param upper: If upper is ``True``, the key will use upper-case
letters. Defaults to ``False``.
"""
settings = {}
result = None
if from_group is not None:
result = SettingsGroup.query.filter_by(key=from_group).\
first_or_404()
result = result.settings
else:
print(Setting.query)
result = cls.query.all()
for setting in result:
if upper:
setting_key = setting.key.upper()
else:
setting_key = setting.key
settings[setting_key] = setting.value
return settings
@classmethod
def invalidate_cache(cls):
"""Invalidates this objects cached metadata."""
cache.delete_memoized(cls.as_dict, cls)
| nathanhilbert/FPA_Core | openspending/forum/management/models.py | Python | agpl-3.0 | 8,135 | 0 |
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ludacity.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| branchard/ludacity | manage.py | Python | gpl-2.0 | 253 | 0 |
# -*- encoding: utf-8 -*-
# https://github.com/justintv/Twitch-API/blob/master/v3_resources/blocks.md
from twitch.queries import query
# Needs Authentification
@query
def by_name(user):
raise NotImplementedError
# Needs Authentification, needs PUT
@query
def add_block(user, target):
raise NotImplementedError
# Needs Authentification, needs DELETE
@query
def del_block(user, target):
raise NotImplementedError
| ingwinlu/python-twitch | twitch/api/v3/blocks.py | Python | gpl-3.0 | 430 | 0 |
class Solution(object):
def isScramble(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
if s1 == s2:
return True
if len(s1) != len(s2):
return False
from collections import Counter
if Counter(s1) != Counter(s2):
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
return True
return False
s = Solution()
inputs = [
["great", "rgeat"],
["abcde", "caebd"]
]
for i in inputs:
print s.isScramble(*i)
| daicang/Leetcode-solutions | 087-scramble-string.py | Python | mit | 759 | 0.00527 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import dateutil.parser
import requests
from django.conf import settings
from django.contrib.admin import ModelAdmin
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy
from weblate import USER_AGENT
from weblate.auth.models import User
from weblate.trans.models import Component, Project
from weblate.utils.backup import backup, get_paper_key, initialize, make_password, prune
from weblate.utils.site import get_site_url
from weblate.utils.stats import GlobalStats
from weblate.vcs.ssh import get_key_data
class WeblateModelAdmin(ModelAdmin):
"""Customized Model Admin object."""
delete_confirmation_template = 'wladmin/delete_confirmation.html'
delete_selected_confirmation_template = 'wladmin/delete_selected_confirmation.html'
class ConfigurationErrorManager(models.Manager):
def add(self, name, message, timestamp=None):
if timestamp is None:
timestamp = timezone.now()
obj, created = self.get_or_create(
name=name, defaults={'message': message, 'timestamp': timestamp}
)
if created:
return obj
if obj.message != message or obj.timestamp != timestamp:
obj.message = message
obj.timestamp = timestamp
obj.save(update_fields=['message', 'timestamp'])
return obj
def remove(self, name):
self.filter(name=name).delete()
@python_2_unicode_compatible
class ConfigurationError(models.Model):
name = models.CharField(unique=True, max_length=150)
message = models.TextField()
timestamp = models.DateTimeField(default=timezone.now)
ignored = models.BooleanField(default=False, db_index=True)
objects = ConfigurationErrorManager()
class Meta(object):
index_together = [('ignored', 'timestamp')]
def __str__(self):
return self.name
SUPPORT_NAMES = {
'community': ugettext_lazy('Community support'),
'hosted': ugettext_lazy('Hosted service'),
'basic': ugettext_lazy('Basic self-hosted support'),
'extended': ugettext_lazy('Extended self-hosted support'),
}
class SupportStatusManager(models.Manager):
def get_current(self):
try:
return self.latest('expiry')
except SupportStatus.DoesNotExist:
return SupportStatus(name='community')
@python_2_unicode_compatible
class SupportStatus(models.Model):
name = models.CharField(max_length=150)
secret = models.CharField(max_length=400)
expiry = models.DateTimeField(db_index=True, null=True)
in_limits = models.BooleanField(default=True)
objects = SupportStatusManager()
def get_verbose(self):
return SUPPORT_NAMES.get(self.name, self.name)
def __str__(self):
return '{}:{}'.format(self.name, self.expiry)
def refresh(self):
stats = GlobalStats()
data = {
'secret': self.secret,
'site_url': get_site_url(),
'site_title': settings.SITE_TITLE,
'users': User.objects.count(),
'projects': Project.objects.count(),
'components': Component.objects.count(),
'languages': stats.languages,
'source_strings': stats.source_strings,
}
ssh_key = get_key_data()
if ssh_key:
data['ssh_key'] = ssh_key['key']
headers = {'User-Agent': USER_AGENT}
response = requests.request(
'post', settings.SUPPORT_API_URL, headers=headers, data=data
)
response.raise_for_status()
payload = response.json()
self.name = payload['name']
self.expiry = dateutil.parser.parse(payload['expiry'])
self.in_limits = payload['in_limits']
BackupService.objects.get_or_create(
repository=payload['backup_repository'], defaults={"enabled": False}
)
@python_2_unicode_compatible
class BackupService(models.Model):
repository = models.CharField(
max_length=500, default='', verbose_name=ugettext_lazy('Backup repository')
)
enabled = models.BooleanField(default=True)
timestamp = models.DateTimeField(default=timezone.now)
passphrase = models.CharField(max_length=100, default=make_password)
paperkey = models.TextField()
def __str__(self):
return self.repository
def last_logs(self):
return self.backuplog_set.order_by('-timestamp')[:10]
def ensure_init(self):
if not self.paperkey:
log = initialize(self.repository, self.passphrase)
self.backuplog_set.create(event='init', log=log)
self.paperkey = get_paper_key(self.repository)
self.save()
def backup(self):
log = backup(self.repository, self.passphrase)
self.backuplog_set.create(event='backup', log=log)
def prune(self):
log = prune(self.repository, self.passphrase)
self.backuplog_set.create(event='prune', log=log)
@python_2_unicode_compatible
class BackupLog(models.Model):
service = models.ForeignKey(BackupService, on_delete=models.deletion.CASCADE)
timestamp = models.DateTimeField(default=timezone.now)
event = models.CharField(
max_length=100,
choices=(
('backup', ugettext_lazy('Backup performed')),
('prune', ugettext_lazy('Deleted the oldest backups')),
('init', ugettext_lazy('Repository initialization')),
),
)
log = models.TextField()
def __str__(self):
return '{}:{}'.format(self.service, self.event)
| dontnod/weblate | weblate/wladmin/models.py | Python | gpl-3.0 | 6,438 | 0.000777 |
#!/usr/bin/python
#
# motorsClass.py MOTORS CLASS
#
# METHODS:
# motors(readingPerSec) # create instance and motor control thread
# cancel() # stop motors, close motor control thread
# drive(driveSpeed) # ramp speed to go fwd(+) or back(-) at 0-100%
# travel(distance.inInches, driveSpeed=MEDIUM) # go fwd(+) or back(-) a distance
# spin(spinSpeed) # ramp spin speed to go ccw(+) or cw(-) at 0-100%
# turn(Motors.DIRECTION) # Turn ccw(+) cw(-) to angle from 0
# stop() # come to graceful stop
# modeToStr(mode=motorsMode) # string version of motorsMode or passed mode constant
# mode() # returns Motors.STOPPED,DRIVE,TRAVEL,SPIN,TURN,STOP
# halt() # immediate stop
# currentSpeed() # numerical speed percent +/- 0-100 of minToMove to max speed
# speedToStr(speed=_currentSpeed) # returns string name or str() of param or currentSpeed
# calibrate() # find minFwdPwr, minBwdPwr,
# # minCCWDPwr, minCWPwr,
# # biasFwd, biasBwd
# waitForStopped(timeout=60) # call to wait for motion to end with timeout
# VARIABLES
# readingsPerSec
# CONSTANTS
#
# Motors.NONE,CW360,CCW360,CW180,CCW180,CW135,CCW135,CW90,CCW90,CW45,CCW45 (TURNDIRS)
# dirToStr() # returns string for Motor.TURNDIRS
#
# ### INTERNAL METHODS
# __init__(readingsPerSec=10) # initialize instance of class
# setup_motor_pins() # set up Pi Droid Alpha and GPIO
#
# ### THREAD METHODS
#
# pollMotors(tSleep=0.1) # motor control thread
#
# rampTgtCurStep(target,current,rampStep) # calculate next speed on ramp to target
# speed2Pwr(s,driveSpeed,spinSpeed) # convert speed (+/- 0-100%) to
# setMotorsPwr(lPwr,rPwr) # Apply power to motors Pwr: +/- (0-255)
# # power between (+/- 255 and minimum to move)
# control() # dispatch to control methods based on motorsMode
# controlDrive() # monitor drive mode
# controlSpin() # monitor spin mode
# controlTravel() # monitor drive until distance reached
# controlTurn() # monitor spin until angle reached
# controlStop() # monitor drive or spin while stopping
# controlStopped() # routine called while motors are not running
#
# motors_off()
#
# motors_fwd()
# motors_bwd()
# motors_ccw()
# motors_cw()
# INTERNAL VARS
#
# motorsMode
# self.debugLevel 0=off 1=basic 99=all
import sys
# uncomment when testing below rwpilib\
#sys.path.insert(0,'..')
import PDALib
import myPDALib
import myPyLib
from myPyLib import sign, clamp
import time
import threading
import traceback
import datetime
import encoders
class Motors():
# CLASS VARS (Avail to all instances)
# Access as Motors.class_var_name
pollThreadHandle=None # the SINGLE read sensor thread for the Motors class
tSleep=0.1 # time for read_sensor thread to sleep
debugLevel=0 # set self.debugLevel (or motors.debugLevel) =99 for all, =1 for some
# Empirical settings for minimum drive to turn each wheel
# PWM_frequency dependent, PiDALib default is 490
# PWM_f RMotorMinF LMotorMinF
# 10000 215 185
# 490 83 73 <--
# 100 34 33
# 33 22 20
# RMotorMinF = 83 # no load (takes more to get right going reliably)
# LMotorMinF = 73 # no load
# RMotorMinB = 94 # no load (takes more to get right going reliably)
# LMotorMinB = 86 # no load
# Motor Pins
# SRV 6 Motor 1 Speed (PWM)
# SRV 7 Motor 2 Speed (PWM)
RMotor = 6
LMotor = 7
# DIO 12 (A4) Motor 1 Dir A (0=coast 1=F/Brake)
# DIO 13 (A5) Motor 1 Dir B (0=coast 1=R/Brake)
# DIO 14 (A6) Motor 2 Dir A (0=coast 1=F/Brake)
# DIO 15 (A7) Motor 2 Dir B (0=coast 1=R/Brake)
M1DirA = 12
M1DirB = 13
M2DirA = 14
M2DirB = 15
minFwdPwr = 145 # 83 # minimum to start moving forward
minBwdPwr = 145 # 120 # 94 # minimum to start moving backward
driveSpeed = 0 # target 0 to +/-100% of speed range
_currentSpeed = 0 # current speed at the moment, ramps up or down
rampStep = 13 # amount to change speed each time through control loop
minCCWPwr = 120 # 86 # minimum drive to spin CCW
minCWPwr = 120 # 94 # minimum drive to spin CW
biasFwd = 21 # amount of right more than left needed to go Fwd straight
biasBwd = 0 # amount of right more than left needed to go Bwd straight
maxPwr = 255
driveDistance = 0 # distance in inches fwd(+) or bwd(-)
currentDistance= 0 # how far travelled since told to travel
initialLeftCount = 0 # place to store the counter value when starting motion
initialRightCount = 0
initialMeanCount = 0
targetTime= 0 # time to stop travel (till encoders)
spinSpeed = 0 # speed to spin ccw(+) cw(-)
turnDir = 0
#Modes
STOPPED = 0
DRIVE = 1
TRAVEL = 2
SPIN = 3
TURN = 4
STOP = 5
Modes2Str = { STOPPED : 'STOPPED',
STOP : 'STOP',
DRIVE : 'DRIVE',
TRAVEL : 'TRAVEL',
SPIN : 'SPIN',
TURN : 'TURN' }
motorsMode = STOPPED
def mode(self):
return self.motorsMode
def modeToStr(mode = motorsMode):
return Modes2Str[mode]
lastMotorsMode = STOPPED
#Speeds
NONE = 0
SLOW = 1
WALK = 5
MEDIUM = 50
FAST = 100
SpeedsToStr = {NONE : 'NONE',
SLOW : 'SLOW',
WALK : 'WALK',
MEDIUM : 'MEDIUM',
FAST : 'FAST',
-SLOW : '-SLOW',
-WALK : '-WALK',
-MEDIUM : '-MEDIUM',
-FAST : '-FAST' }
def currentSpeed():
return _currentSpeed
def speedToStr(nSpeed=_currentSpeed):
if (nSpeed in SpeedsToStr):
speedStr=SpeedsToStr[nSpeed]
else:
speedStr=str(nSpeed)
InchesPerSec = { # travel distance per second (for 24")
SLOW : 1.5,
WALK : 2.0,
MEDIUM : 3.1,
FAST : 6.7,
-SLOW : 1.5,
-WALK : 2.0,
-MEDIUM: 3.1,
-FAST : 6.7 }
MotorRampTime = 0.25 # NOT IMPLEMENTED
CCW360 = 3.15 # seconds to turn at Motors.MEDIUM
CCW180 = 1.58
CCW135 = 1.15
CCW90 = 0.84
CCW45 = 0.5
CW360 = -CCW360
CW180 = -CCW180
CW135 = -CCW135
CW90 = -CCW90 * 0.93
CW45 = -CCW45 * 0.9
NOTURN = 0
DirsToStr = {
CCW45 : 'CCW45',
CCW90 : 'CCW90',
CCW135 : 'CCW135',
CCW180 : 'CCW180',
CCW360 : 'CCW360',
CW45 : 'CW45',
CW90 : 'CW90',
CW135 : 'CW135',
CW180 : 'CW180',
CW360 : 'CW360',
NOTURN : 'NO TURN'}
def dirToStr(self, mDir):
if (mDir in self.DirsToStr):
strDir=self.DirsToStr[mDir]
else:
strDir='?'
return strDir
# end of class vars definition
# ### encoder methods
def setInitialCounts(self):
initialLeftCount=encoders.leftCount()
initialRightCount=encoders.rightCount()
initialMeanCount=(initialLeftCount+initialRightCount)/2.0
def distanceTraveled(self):
currentLeftCount = encoders.leftCount()
currentRightCount = encoders.rightCount()
currentMeanCount = ( currentLeftCount + currentRightCount) / 2.0
countsTraveled = (currentMeanCount - self.initialMeanCount)
distance=countsTraveled * encoders.InchesPerCount
if (self.debugLevel > 1):
print "motorsClass:distanceTraveled: called"
print "encoder status:"
encoders.printStatus()
print "distance traveled:", distance
return distance
def __init__(self,readingsPerSec=10):
self.setup_motor_pins()
# SINGLETON TEST
if (Motors.pollThreadHandle!=None):
print "Second Motors Class Object, not starting pollingThread"
return None
# INITIALIZE CLASS INSTANCE
# START A THREAD
# threading target must be an instance
print "Motors: worker thread readingsPerSec:",readingsPerSec
Motors.tSleep=1.0/readingsPerSec #compute desired sleep
Motors.readingsPerSec=readingsPerSec
Motors.pollThreadHandle = threading.Thread( target=self.pollMotors,
args=(Motors.tSleep,))
Motors.pollThreadHandle.start()
if (self.debugLevel >0): print "Motors worker thread told to start",datetime.datetime.now()
time.sleep(0.01) # give time for motor control thread to start
#end init()
# Motors THREAD WORKER METHOD TO CONTROL MOTORS
def pollMotors(self,tSleep=0.1):
print ("Motors: pollMotors thread started with %f at %s" % (tSleep,datetime.datetime.now()))
t = threading.currentThread() # get handle to self (pollingMotors thread)
while getattr(t, "do_run", True): # check the do_run thread attribute
self.control()
time.sleep(tSleep)
if (self.debugLevel >0): print("do_run went false. Stopping pollMotors thread at %s" % datetime.datetime.now())
# RAMP FROM CURRENT TO TARGET IN STEP (TARGET BETWEEN -100 to +100)
#
# usage: nextCurrent = rampTargetCurrentStep(target, current, rampStep)
#
def rampTgtCurStep(self, target, current, rampStep):
if (self.debugLevel >1): print "\n", datetime.datetime.now()
if (self.debugLevel >1): print "tgt: %d cur: %d ramp: %d" % (target, current, rampStep)
nextCurrent = current
if (nextCurrent != target):
if (target >= 0):
if (current < target):
nextCurrent += rampStep
nextCurrent = clamp(nextCurrent,-100,target)
elif (current > target):
nextCurrent -= rampStep
nextCurrent = clamp(nextCurrent,target,100)
elif (target<0):
if (current > target):
nextCurrent -= rampStep
nextCurrent = clamp(nextCurrent,target,100)
elif (current < target):
nextCurrent += rampStep
nextCurrent = clamp(nextCurrent,-100,target)
if (self.debugLevel >1): print "nextCurrent: %d" % nextCurrent
return nextCurrent
# ##### SPEED2PWR
# convert left,right speeds +/-(0 to 100) to power +/-(minDrive to 255)
# (with positive bias of right more than left)
# returns (lpwr,rpwr)
#
def speed2Pwr(s,driveSpeed,spinSpeed):
# ### DRIVE
if (driveSpeed>0): # FORWARD
pwrA= int( (s.maxPwr-s.minFwdPwr)*(driveSpeed/100.0) + s.minFwdPwr)
pwrB= (pwrA-abs(s.biasFwd))
if (s.biasFwd>0):
rPwr=pwrA # right more than left
lPwr=pwrB
else:
rPwr=pwrB # right less than left
lPwr=pwrA
if (driveSpeed<0): # BACKWARD
pwrA= int( (s.maxPwr-s.minBwdPwr) * abs(driveSpeed/100.0) + s.minBwdPwr)
pwrB= (pwrA-abs(s.biasBwd))
if (s.biasBwd>0):
rPwr=-pwrA # right more than left
lPwr=-pwrB
else:
rPwr=-pwrB # right less than left
lPwr=-pwrA
# ### SPIN
if (spinSpeed>0 ): # CCW
rPwr= int( (s.maxPwr-s.minCCWPwr)*(spinSpeed/100.0) + s.minCCWPwr)
lPwr= -rPwr
elif (spinSpeed<0 ): # CW
lPwr= int( (s.maxPwr-s.minCWPwr) * abs(spinSpeed/100.0) + s.minCWPwr)
rPwr= -lPwr
elif (spinSpeed ==0 and driveSpeed==0):
lPwr=0
rPwr=0
return (lPwr,rPwr)
def controlDrive(self):
if (self.debugLevel >1): print "handling motorsMode DRIVE"
self._currentSpeed = self.rampTgtCurStep(self.driveSpeed,
self._currentSpeed,
self.rampStep)
lPwr,rPwr = self.speed2Pwr(self._currentSpeed,0)
self.setMotorsPwr(lPwr,rPwr) # pwrs=(lPwr,rPwr)
# ### CONTROL TRAVEL
# Travel at set speed until 30% of distance, then WALK
def controlTravel(self):
if (self.debugLevel >1): print "handling motorsMode TRAVEL"
self._currentSpeed = self.rampTgtCurStep(self.driveSpeed,
self._currentSpeed,
self.rampStep)
lPwr,rPwr = self.speed2Pwr(self._currentSpeed,0)
self.setMotorsPwr(lPwr,rPwr) # pwrs=(lPwr,rPwr)
if (self.debugLevel >1): print "controlTravel:",datetime.datetime.now()
if (self.targetTime == 0):
# tvl_time is based on driveDistance which may be negative - use sign(driveDistance) to fix
tvl_time = sign(self.driveDistance)* self.driveDistance/self.InchesPerSec[self.driveSpeed]
if (self.debugLevel >1): print "controlTravel: tvl_time: %.1f" % tvl_time
tgt_secs = int(tvl_time)
tgt_millisec = int((tvl_time-tgt_secs)*1000)
tgt_delta=datetime.timedelta(seconds=tgt_secs+5, milliseconds=tgt_millisec)
self.targetTime = datetime.datetime.now()+tgt_delta
if (datetime.datetime.now() > self.targetTime):
if (self.debugLevel >0): print ("controlTravel: hit time limit at %s" % datetime.datetime.now() )
self.targetTime = 0
self.stop()
self.currentDistance = self.distanceTraveled()
if (self.currentDistance > abs(self.driveDistance)):
if (self.debugLevel >0): print ("controlTravel: hit distance limit at %s" % datetime.datetime.now() )
self.targetTime = 0
self.stop()
else:
if (self.debugLevel >0):
print "controlTravel: dist: %.1f" % self.currentDistance
if (abs(self.driveSpeed) > Motors.WALK):
if (self.currentDistance > abs(0.3 * self.driveDistance)):
self.driveSpeed = sign(self.driveDistance) * Motors.WALK
if (self.debugLevel > 0): print "motorsClass:controlTravel:30% there - slow to WALK"
return
def controlSpin(self):
if (self.debugLevel >1): print "handling motorsMode SPIN"
self._currentSpeed = self.rampTgtCurStep(self.spinSpeed,
self._currentSpeed,
self.rampStep)
lPwr,rPwr = self.speed2Pwr(0,self._currentSpeed) # (drive,spin)
self.setMotorsPwr(lPwr,rPwr) # pwrs=(lPwr,rPwr)
return
def controlTurn(self):
if (self.debugLevel >1): print "handling motorsMode TURN"
if (self.debugLevel >1): print "controlTurn:",datetime.datetime.now()
if (self.targetTime == 0):
trn_time = sign(self.turnDir)*self.turnDir
tgt_secs = int(trn_time)
tgt_millisec = int( (trn_time - clamp(tgt_secs,0,60) )*1000)
tgt_delta=datetime.timedelta(seconds=tgt_secs, milliseconds=tgt_millisec)
self.targetTime = datetime.datetime.now()+tgt_delta
if (self.debugLevel >1): print ("tgt_secs: %d tgt_millisec: %d tgt_time: %s" % (tgt_secs, tgt_millisec, self.targetTime))
if (datetime.datetime.now() > self.targetTime):
if (self.debugLevel >1): print ("controlTurn: hit requested limit at %s" % datetime.datetime.now() )
self.targetTime = 0
# self.stop()
self.spinSpeed = Motors.NONE
self.driveSpeed = Motors.NONE
self.driveDistance = 0
self.motorsMode = Motors.STOP
self._currentSpeed = self.rampTgtCurStep(self.spinSpeed,
self._currentSpeed,
self.rampStep)
lPwr,rPwr = self.speed2Pwr(0,self._currentSpeed) # (drive,spin)
self.setMotorsPwr(lPwr,rPwr)
return
def controlStop(self):
if (self.debugLevel >1): print "handling motorsMode STOP"
if (self.debugLevel >1): print "controlStop:",datetime.datetime.now()
self._currentSpeed = self.rampTgtCurStep(0,
self._currentSpeed,
self.rampStep)
if self.lastMotorsMode in (Motors.DRIVE, Motors.TRAVEL):
lPwr,rPwr = self.speed2Pwr(self._currentSpeed,0) # (drive,spin)
elif self.lastMotorsMode in (Motors.SPIN, Motors.TURN):
lPwr,rPwr = self.speed2Pwr(0,self._currentSpeed) # (drive,spin)
else: # Handle stopping from all other modes
lPwr = 0
rPwr = 0
self.setMotorsPwr(lPwr,rPwr) # pwrs=(lPwr,rPwr)
if (self._currentSpeed == 0): self.motorsMode=Motors.STOPPED
return
def controlStopped(self):
#if (self.debugLevel >1): print "handling motorsMode STOPPED"
#if (self.debugLevel >1): print "controlStopped:",datetime.datetime.now()
pass
return
def control(self): #CONTROL THE MOTORS
if (self.debugLevel >1): print ("motorsMode: %s " % (self.Modes2Str[self.motorsMode]))
if (self.debugLevel >1): print ("driveSpeed: %s:%d spinSpeed: %s:%d currentSpeed: %d" % (self.SpeedsToStr[self.driveSpeed], self.driveSpeed, self.SpeedsToStr[self.spinSpeed], self.spinSpeed,self._currentSpeed ) )
if (self.debugLevel >1): print ("driveDist : %.1f currentDist: %.1f" % (self.driveDistance,self.currentDistance) )
if (self.debugLevel >1): print ("turnDir : %d " % self.turnDir)
if (self.motorsMode == Motors.DRIVE): self.controlDrive()
elif (self.motorsMode == Motors.TRAVEL): self.controlTravel()
elif (self.motorsMode == Motors.SPIN): self.controlSpin()
elif (self.motorsMode == Motors.TURN): self.controlTurn()
elif (self.motorsMode == Motors.STOP): self.controlStop()
elif (self.motorsMode == Motors.STOPPED):self.controlStopped()
else:
if (self.debugLevel >1): print "handling motorsMode else"
return
def setup_motor_pins(self):
PDALib.pinMode(Motors.RMotor,PDALib.PWM) # init motor1 speed control pin
PDALib.pinMode(Motors.LMotor,PDALib.PWM) # init motor2 speed control pin
PDALib.pinMode(Motors.M1DirA,PDALib.OUTPUT) #init motor1 dirA/Fwd enable
PDALib.pinMode(Motors.M1DirB,PDALib.OUTPUT) #init motor1 dirB/Bkwd enable
PDALib.pinMode(Motors.M2DirA,PDALib.OUTPUT) #init motor2 dirA/Fwd enable
PDALib.pinMode(Motors.M2DirB,PDALib.OUTPUT) #init motor2 dirB/Bkwd enable
def motors_off(self):
# two ways to stop - set speed to 0 or set direction to off/coast
self.spinSpeed =Motors.NONE
self.driveSpeed =Motors.NONE
# turn off the speed pins
PDALib.analogWrite(Motors.RMotor,0) #set motor1 to zero speed
PDALib.analogWrite(Motors.LMotor,0) #set motor2 to zero speed
# all direction pins to off
PDALib.digitalWrite(Motors.M1DirA,0) #set to off/coast
PDALib.digitalWrite(Motors.M1DirB,0) #set to off/coast
PDALib.digitalWrite(Motors.M2DirA,0) #set to off/coast
PDALib.digitalWrite(Motors.M2DirB,0) #set to off/coast
self.motorsMode = Motors.STOPPED
def motors_fwd(self):
motors_off()
PDALib.digitalWrite(Motors.M1DirA,1) #rt set to forward
PDALib.digitalWrite(Motors.M2DirA,1) #lft set to forward
def motors_bwd(self):
motors_off()
PDALib.digitalWrite(Motors.M1DirB,1) #rt set to backward
PDALib.digitalWrite(Motors.M2DirB,1) #lft set to backward
def motors_ccw(self):
motors_off()
PDALib.digitalWrite(Motors.M1DirA,1) #R set to forward
PDALib.digitalWrite(Motors.M2DirB,1) #L set to backward
def motors_cw(self):
motors_off()
PDALib.digitalWrite(Motors.M1DirB,1) #R set to backward
PDALib.digitalWrite(Motors.M2DirA,1) #L set to forward
# drive(Motors.SPEED) # ramp speed to go fwd(+) or back(-) at 0-100%
def drive(self,speed):
self.motorsMode = Motors.DRIVE
self.spinSpeed = self.NONE
self.driveDistance = 0
self.driveSpeed = speed
return
# travel(distance.inInches, Motors.SPEED) # go fwd (+) or back (-) a distance
def travel(self,distance, speed=MEDIUM):
self.motorsMode = Motors.TRAVEL
self.spinSpeed =self.NONE
self.driveDistance = distance
encoders.reset()
self.setInitialCounts()
self.driveSpeed = speed * sign(distance)
if (self.debugLevel >0): print ("starting travel %.1f at %d" % (distance, speed))
return
# spin(Motors.SPEED) # ramp spin speed to go ccw(+) or cw(-) at 0-100%
def spin(self, speed):
self.motorsMode = Motors.SPIN
self.driveSpeed = Motors.NONE
self.driveDistance = 0
self.spinSpeed = speed
return
# turn(Motors.DIRECTION) # Turn to direction in degrees
def turn(self, direction):
self.motorsMode = Motors.TURN
self.driveSpeed = Motors.NONE
self.driveDistance = 0
self.turnDir = direction
self.spinSpeed = Motors.MEDIUM * sign(direction)
return
# stop() # come to graceful stop
def stop(self): # don't change mode, just bring speed to 0
self.spinSpeed = Motors.NONE
self.driveSpeed = Motors.NONE
self.driveDistance = 0
self.lastMotorsMode = self.motorsMode
self.motorsMode = Motors.STOP
return
# halt() # immediate stop
def halt(self):
self.motors_off()
self.spinSpeed = Motors.NONE
self.driveSpeed = Motors.NONE
self.driveDistance = 0
self.motorsMode = Motors.STOPPED
return
# calibrate() # find minFwdDrive, minBwdDrive, minCCWDrive, minCWDrive, biasFwd, biasBwd
def calibrate(self):
if (self.debugLevel >0): print "Calibrate() Started"
time.sleep(1)
if (self.debugLevel >0): print "Calibrate minFwdDrive, minBwdDrive"
time.sleep(1)
if (self.debugLevel >0): print "Calibrate minCCWDrive, minCWDrive"
time.sleep(1)
if (self.debugLevel >0): print "Calibrate biasFwd, biasBwd"
time.sleep(1)
if (self.debugLevel >0): print "\n"
if (self.debugLevel >0): print "*******************"
if (self.debugLevel >0): print "Calibration Results"
if (self.debugLevel >0): print ("minFwdDrive: %d minBwdDrive: %d" % (self.minFwdDrive, self.minBwdDrive))
if (self.debugLevel >0): print ("minCCWDrive: %d minCWDrive: %d" % (self.minCCWDrive, self.minCWDrive))
if (self.debugLevel >0): print ("biasFwd: %d biasBwd: %d" % (self.biasFwd, self.biasBwd))
if (self.debugLevel >0): print "Done"
return
def cancel(self):
print "Motors.cancel() called"
self.pollThreadHandle.do_run = False
print "Waiting for Motors.control Thread to quit"
self.pollThreadHandle.join()
self.halt()
def waitForStopped(self, timeout=60):
if (self.debugLevel >0): print ("waitForStopped or %.1f" % timeout)
tWaitForModeChange = 2*Motors.tSleep
time.sleep(tWaitForModeChange)
timeout_delta=datetime.timedelta(seconds=int(timeout))
timeoutTime = datetime.datetime.now()+timeout_delta
while ((datetime.datetime.now() < timeoutTime) and (self.motorsMode != Motors.STOPPED)):
time.sleep(tWaitForModeChange)
# ##### LOW LEVEL MOTOR METHODS
# setMotorsPwr(lPwr,rPwr) # Pwr:+/- 0-255
def setMotorsPwr(self,lPwr,rPwr):
if (lPwr>0):
PDALib.digitalWrite(Motors.M2DirA,1) #lft set to forward
PDALib.digitalWrite(Motors.M2DirB,0) #set to off/coast
elif (lPwr<0):
PDALib.digitalWrite(Motors.M2DirA,0) #set to off/coast
PDALib.digitalWrite(Motors.M2DirB,1) #lft set to backward
else:
PDALib.digitalWrite(Motors.M2DirA,0) #set to off/coast
PDALib.digitalWrite(Motors.M2DirB,0) #set to off/coast
if (rPwr>0):
PDALib.digitalWrite(Motors.M1DirA,1) #rt set to forward
PDALib.digitalWrite(Motors.M1DirB,0) #set to off/coast
elif (rPwr<0):
PDALib.digitalWrite(Motors.M1DirA,0) #set to off/coast
PDALib.digitalWrite(Motors.M1DirB,1) #rt set to backward
else:
PDALib.digitalWrite(Motors.M1DirA,0) #set to off/coast
PDALib.digitalWrite(Motors.M1DirB,0) #set to off/coast
# Now power the motors
if (self.debugLevel >1): print ("setMotorsPwr(lPwr:%d,rPwr:%d) %s" % (lPwr,rPwr,datetime.datetime.now()))
PDALib.analogWrite(Motors.LMotor,abs(lPwr)) #set lft motor2
PDALib.analogWrite(Motors.RMotor,abs(rPwr)) #set rt motor1
# end setMotorsPwr()
# ##### Motors CLASS TEST METHOD ######
# the first time through the main() while loop, the sensors may not have been read yet
# so Motors.status() and each Motors may have a value of 8/UNKNOWN
def main():
# note: lowercase Motors is object, uppercase Motors is class (everywhere in code)
motors=Motors(readingsPerSec=10) #create instance and control Motors thread
myPyLib.set_cntl_c_handler(motors.cancel) # Set CNTL-C handler
try:
print "\n"
# ##### TEST rampTgtCurStep()
# motors.rampTgtCurStep(0, 0, 30)
# motors.rampTgtCurStep(100, -100, 30)
# motors.rampTgtCurStep(100, -50, 30)
# motors.rampTgtCurStep(100, 0, 30)
# motors.rampTgtCurStep(100, 50, 30)
# motors.rampTgtCurStep(100, 80, 30)
# motors.rampTgtCurStep(-100, 100, 30)
# motors.rampTgtCurStep(-100, 10, 30)
# motors.rampTgtCurStep(-100, 0, 30)
# motors.rampTgtCurStep(-100, -50, 30)
# motors.rampTgtCurStep(-100, -80, 30)
# motors.rampTgtCurStep(0, -100, 30)
# motors.rampTgtCurStep(0, -10, 30)
# motors.rampTgtCurStep(0, +10, 30)
# motors.rampTgtCurStep(50, -100, 30)
# motors.rampTgtCurStep(50, 40, 30)
# motors.rampTgtCurStep(50, 60, 30)
# motors.rampTgtCurStep(-50, 100, 30)
# motors.rampTgtCurStep(-50, -40, 30)
# motors.rampTgtCurStep(-50, -60, 30)
# ######## TEST calibrate()
# motors.calibrate()
# ######## TEST spin()
print "TEST SPIN"
motors.spin(Motors.FAST)
time.sleep(5)
motors.stop()
time.sleep(3)
print "spin(SLOW)"
motors.spin(Motors.SLOW)
time.sleep(5)
motors.stop()
time.sleep(3)
print "spin(-FAST)"
motors.spin(-Motors.FAST)
time.sleep(5)
motors.stop()
time.sleep(3)
motors.spin(-Motors.SLOW)
time.sleep(5)
motors.stop()
time.sleep(3)
# ###### TEST drive()
print "TEST DRIVE"
motors.drive(Motors.SLOW)
time.sleep(5)
motors.stop()
time.sleep(3)
motors.drive(-Motors.SLOW)
time.sleep(5)
motors.stop()
time.sleep(3)
motors.drive(Motors.MEDIUM)
time.sleep(5)
motors.stop()
time.sleep(3)
motors.drive(-Motors.MEDIUM)
time.sleep(5)
motors.stop()
time.sleep(3)
motors.drive(Motors.FAST)
time.sleep(5)
motors.stop()
time.sleep(3)
motors.drive(-Motors.FAST)
time.sleep(5)
motors.stop()
time.sleep(3)
# ####### TEST travel()
print "TEST TRAVEL FWD 6.0 inches:", datetime.datetime.now()
motors.travel(6.0, Motors.MEDIUM)
time.sleep(5.0)
print "TEST TRAVEL BWD(-) 6.0 inches:", datetime.datetime.now()
motors.travel(-6.0, Motors.MEDIUM)
time.sleep(5.0)
# ####### TEST turn()
print "TEST TURNS"
trn1=Motors.CCW90
trn2=Motors.CW180
trn3=Motors.CCW90
motors.turn(trn1)
time.sleep(5)
motors.turn(trn2)
time.sleep(5)
motors.turn(trn3)
time.sleep(5)
# ###### EXIT TEST GRACEFULLY cancel()
motors.cancel()
except SystemExit:
myPDALib.PiExit()
print "MotorsClass TEST: Bye Bye"
except:
print "Exception Raised"
motors.cancel()
traceback.print_exc()
if __name__ == "__main__":
main()
| slowrunner/RWPi | posie/posie-web/rwpilib/motorsClass.py | Python | gpl-3.0 | 27,889 | 0.033992 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating HTTP health checks."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import health_checks_utils
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class Create(base_classes.BaseAsyncCreator):
"""Create a HTTP health check to monitor load balanced instances."""
@staticmethod
def Args(parser):
health_checks_utils.AddHttpRelatedCreationArgs(parser)
health_checks_utils.AddProtocolAgnosticCreationArgs(parser, 'HTTP')
@property
def service(self):
return self.compute.healthChecks
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'healthChecks'
def CreateRequests(self, args):
"""Returns the request necessary for adding the health check."""
health_check_ref = self.CreateGlobalReference(
args.name, resource_type='healthChecks')
proxy_header = self.messages.HTTPHealthCheck.ProxyHeaderValueValuesEnum(
args.proxy_header)
request = self.messages.ComputeHealthChecksInsertRequest(
healthCheck=self.messages.HealthCheck(
name=health_check_ref.Name(),
description=args.description,
type=self.messages.HealthCheck.TypeValueValuesEnum.HTTP,
httpHealthCheck=self.messages.HTTPHealthCheck(
host=args.host,
port=args.port,
portName=args.port_name,
requestPath=args.request_path,
proxyHeader=proxy_header),
checkIntervalSec=args.check_interval,
timeoutSec=args.timeout,
healthyThreshold=args.healthy_threshold,
unhealthyThreshold=args.unhealthy_threshold,
),
project=self.project)
return [request]
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(Create):
"""Create a HTTP health check to monitor load balanced instances."""
@staticmethod
def Args(parser):
Create.Args(parser)
health_checks_utils.AddHttpRelatedResponseArg(parser)
def CreateRequests(self, args):
"""Returns the request necessary for adding the health check."""
requests = super(CreateAlpha, self).CreateRequests(args)
requests[0].healthCheck.httpHealthCheck.response = args.response
return requests
Create.detailed_help = {
'brief': ('Create a HTTP health check to monitor load balanced instances'),
'DESCRIPTION': """\
*{command}* is used to create a HTTP health check. HTTP health checks
monitor instances in a load balancer controlled by a target pool. All
arguments to the command are optional except for the name of the health
check. For more information on load balancing, see
[](https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/)
""",
}
| KaranToor/MA450 | google-cloud-sdk/lib/surface/compute/health_checks/create/http.py | Python | apache-2.0 | 3,471 | 0.004033 |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('grandbudapest')
# Fixing random state for reproducibility
np.random.seed(1)
fig, axes = plt.subplots(ncols=2, nrows=2)
ax1, ax2, ax3, ax4 = axes.ravel()
# scatter plot (Note: `plt.scatter` doesn't use default colors)
x, y = np.random.normal(size=(2, 200))
ax1.plot(x, y, 'o')
ax1.set_title('Scatter plot')
# sinusoidal lines with colors from default color cycle
L = 2*np.pi
x = np.linspace(0, L)
ncolors = len(plt.rcParams['axes.prop_cycle'])
shift = np.linspace(0, L, ncolors, endpoint=False)
for s in shift:
ax2.plot(x, np.sin(x + s), '-')
ax2.margins(0)
ax2.set_title('Line plot')
# bar graphs
x = np.arange(5)
y1, y2 = np.random.randint(1, 25, size=(2, 5))
width = 0.25
ax3.bar(x, y1, width)
ax3.bar(x + width, y2, width,
color=list(plt.rcParams['axes.prop_cycle'])[2]['color'])
ax3.set_xticks(x + width)
ax3.set_xticklabels(['a', 'b', 'c', 'd', 'e'])
ax3.set(xlabel='X labels', ylabel='Y labels')
# circles with colors from default color cycle
for i, color in enumerate(plt.rcParams['axes.prop_cycle']):
xy = np.random.normal(size=2)
ax4.add_patch(plt.Circle(xy, radius=0.3, color=color['color']))
ax4.axis('equal')
ax4.margins(0)
fig.savefig('grandbudapest.png', bbox_inches='tight')
plt.show()
| cako/mpl_grandbudapest | grandbudapest.py | Python | mit | 1,318 | 0 |
# -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <danielepantaleone@me.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <lembo@dis.uniroma1.it> #
# - Valerio Santarelli <santarelli@dis.uniroma1.it> #
# - Domenico Fabio Savo <savo@dis.uniroma1.it> #
# - Daniele Pantaleone <pantaleone@dis.uniroma1.it> #
# - Marco Console <console@dis.uniroma1.it> #
# #
##########################################################################
from PyQt5 import QtWidgets
from eddy.core.functions.misc import first
from eddy.core.items.common import AbstractItem
class CommandNodeAdd(QtWidgets.QUndoCommand):
"""
This command is used to add a node to a diagram.
"""
def __init__(self, diagram, node):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
"""
super().__init__('add {0}'.format(node.name))
self.diagram = diagram
self.node = node
def redo(self):
"""redo the command"""
self.diagram.addItem(self.node)
self.diagram.sgnItemAdded.emit(self.diagram, self.node)
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
self.diagram.removeItem(self.node)
self.diagram.sgnItemRemoved.emit(self.diagram, self.node)
self.diagram.sgnUpdated.emit()
class CommandNodeSetDepth(QtWidgets.QUndoCommand):
"""
This command is used to change the Z value of diagram nodes.
"""
def __init__(self, diagram, node, zValue):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
:type zValue: float
"""
super().__init__('change {0} depth'.format(node.name))
self.node = node
self.diagram = diagram
self.depth = {'redo': zValue, 'undo': node.zValue()}
def redo(self):
"""redo the command"""
self.node.setZValue(self.depth['redo'])
self.node.updateEdges()
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
self.node.setZValue(self.depth['undo'])
self.node.updateEdges()
self.diagram.sgnUpdated.emit()
class CommandNodeRezize(QtWidgets.QUndoCommand):
"""
This command is used to resize nodes.
"""
def __init__(self, diagram, node, data):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
:type data: dict
"""
super().__init__('resize {0}'.format(node.name))
self.diagram = diagram
self.node = node
self.data = data
def redo(self):
"""redo the command"""
# TURN CACHING OFF
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.NoCache)
self.node.background.setGeometry(self.data['redo']['background'])
self.node.selection.setGeometry(self.data['redo']['selection'])
self.node.polygon.setGeometry(self.data['redo']['polygon'])
for edge, pos in self.data['redo']['anchors'].items():
self.node.setAnchor(edge, pos)
self.node.updateTextPos(moved=self.data['redo']['moved'])
self.node.updateNode()
self.node.updateEdges()
self.node.update()
# TURN CACHING ON
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# TURN CACHING OFF
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.NoCache)
self.node.background.setGeometry(self.data['undo']['background'])
self.node.selection.setGeometry(self.data['undo']['selection'])
self.node.polygon.setGeometry(self.data['undo']['polygon'])
for edge, pos in self.data['undo']['anchors'].items():
self.node.setAnchor(edge, pos)
self.node.updateTextPos(moved=self.data['undo']['moved'])
self.node.updateNode()
self.node.updateEdges()
self.node.update()
# TURN CACHING ON
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
self.diagram.sgnUpdated.emit()
class CommandNodeMove(QtWidgets.QUndoCommand):
"""
This command is used to move nodes (1 or more).
"""
def __init__(self, diagram, undo, redo):
"""
Initialize the command.
:type diagram: Diagram
:type undo: dict
:type redo: dict
"""
self._diagram = diagram
self._edges = set()
self._redo = redo
self._undo = undo
for node in self._redo['nodes']:
self._edges |= node.edges
if len(self._redo['nodes']) != 1:
name = 'move {0} nodes'.format(len(self._redo['nodes']))
else:
name = 'move {0}'.format(first(self._redo['nodes'].keys()).name)
super().__init__(name)
def redo(self):
"""redo the command"""
# Turn off caching.
for edge in self._edges:
edge.setCacheMode(AbstractItem.NoCache)
# Update edges breakpoints.
for edge, breakpoints in self._redo['edges'].items():
for i in range(len(breakpoints)):
edge.breakpoints[i] = breakpoints[i]
# Update nodes positions.
for node, data in self._redo['nodes'].items():
node.setPos(data['pos'])
# Update edge anchors.
for edge, pos in data['anchors'].items():
node.setAnchor(edge, pos)
# Update edges.
for edge in self._edges:
edge.updateEdge()
# Turn on caching.
for edge in self._edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
# Emit updated signal.
self._diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# Turn off caching.
for edge in self._edges:
edge.setCacheMode(AbstractItem.NoCache)
# Update edges breakpoints.
for edge, breakpoints in self._undo['edges'].items():
for i in range(len(breakpoints)):
edge.breakpoints[i] = breakpoints[i]
# Update nodes positions.
for node, data in self._undo['nodes'].items():
node.setPos(data['pos'])
# Update edge anchors.
for edge, pos in data['anchors'].items():
node.setAnchor(edge, pos)
# Update edges.
for edge in self._edges:
edge.updateEdge()
# Turn caching ON.
for edge in self._edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
# Emit updated signal.
self._diagram.sgnUpdated.emit()
class CommandNodeSwitchTo(QtWidgets.QUndoCommand):
"""
This command is used to swap between 2 nodes.
"""
def __init__(self, diagram, node1, node2):
"""
Initialize the command.
:type diagram: Diagram
:type node1: AbstractNode
:type node2: AbstractNode
"""
super().__init__('switch {0} to {1}'.format(node1.name, node2.name))
self.node = {'redo': node2, 'undo': node1}
self.diagram = diagram
def redo(self):
"""redo the command"""
# Add the new node to the diagram.
self.diagram.addItem(self.node['redo'])
self.diagram.sgnItemAdded.emit(self.diagram, self.node['redo'])
# Move the anchor points.
for edge, point in self.node['undo'].anchors.items():
self.node['redo'].setAnchor(edge, point)
# Move the edges.
for edge in self.node['undo'].edges:
if edge.source is self.node['undo']:
edge.source = self.node['redo']
if edge.target is self.node['undo']:
edge.target = self.node['redo']
self.node['redo'].addEdge(edge)
# IMPORTANT: clear anchors dict in the edge or we
# will have also the reference of the previous node
# since it's a dict indexed by item!
edge.anchors.clear()
edge.updateEdge()
# Identify the new node.
self.diagram.sgnNodeIdentification.emit(self.node['redo'])
# Clear edge and anchor references from node1.
self.node['undo'].anchors.clear()
self.node['undo'].edges.clear()
# Remove the old node from the diagram.
self.diagram.removeItem(self.node['undo'])
self.diagram.sgnItemRemoved.emit(self.diagram, self.node['undo'])
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# Add back to the diagram the old node.
self.diagram.addItem(self.node['undo'])
self.diagram.sgnItemAdded.emit(self.diagram, self.node['undo'])
# Move the anchor points back.
for edge, point in self.node['redo'].anchors.items():
self.node['undo'].setAnchor(edge, point)
# Move the edges.
for edge in self.node['redo'].edges:
if edge.source is self.node['redo']:
edge.source = self.node['undo']
if edge.target is self.node['redo']:
edge.target = self.node['undo']
self.node['undo'].addEdge(edge)
# IMPORTANT: clear anchors dict in the edge or we
# will have also the reference of the previous node
# since it's a dict indexed by item!
edge.anchors.clear()
edge.updateEdge()
# Identify the old node.
self.diagram.sgnNodeIdentification.emit(self.node['undo'])
# Clear edge and anchor references from node2.
self.node['redo'].anchors.clear()
self.node['redo'].edges.clear()
# Remove the new node from the diagram.
self.diagram.removeItem(self.node['redo'])
self.diagram.sgnItemRemoved.emit(self.diagram, self.node['redo'])
self.diagram.sgnUpdated.emit()
class CommandNodeSetMeta(QtWidgets.QUndoCommand):
"""
This command is used to set predicates meta.
"""
def __init__(self, project, item, predicate, undo, redo, name=None):
"""
Initialize the command.
:type project: Project
:type item: Item
:type predicate: str
:type undo: dict
:type redo: dict
:type name: str
"""
super().__init__(name or 'set {0} meta'.format(predicate))
self._predicate = predicate
self._project = project
self._item = item
self._undo = undo
self._redo = redo
def redo(self):
"""redo the command"""
self._project.setMeta(self._item, self._predicate, self._redo)
for node in self._project.predicates(self._item, self._predicate):
node.updateNode(selected=node.isSelected())
def undo(self):
"""undo the command"""
self._project.setMeta(self._item, self._predicate, self._undo)
for node in self._project.predicates(self._item, self._predicate):
node.updateNode(selected=node.isSelected())
class CommandNodeChangeInputsOrder(QtWidgets.QUndoCommand):
"""
This command is used to change the order of Role chain and Property assertion inputs.
"""
def __init__(self, diagram, node, inputs):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
:type inputs: DistinctList
"""
self.node = node
self.diagram = diagram
self.inputs = {'redo': inputs, 'undo': node.inputs}
super().__init__('change {0} inputs order'.format(node.name))
def redo(self):
"""redo the command"""
self.node.inputs = self.inputs['redo']
self.node.updateEdges()
self.diagram.sgnUpdated.emit()
def undo(self):
"""redo the command"""
self.node.inputs = self.inputs['undo']
self.node.updateEdges()
self.diagram.sgnUpdated.emit()
class CommandNodeSetBrush(QtWidgets.QUndoCommand):
"""
This command is used to change the brush of predicate nodes.
"""
def __init__(self, diagram, nodes, brush):
"""
Initialize the command.
:type diagram: Diagram
:type nodes: T <= tuple|list|set
:type brush: QBrush
"""
self.nodes = nodes
self.brush = {x: {'undo': x.brush(), 'redo': brush} for x in nodes}
self.diagram = diagram
super().__init__('set {0} brush on {1} node(s)'.format(brush.color().name(), len(nodes)))
def redo(self):
"""redo the command"""
for node in self.nodes:
node.setBrush(self.brush[node]['redo'])
node.updateNode(selected=node.isSelected())
self.diagram.sgnUpdated.emit()
def undo(self):
"""redo the command"""
for node in self.nodes:
node.setBrush(self.brush[node]['undo'])
node.updateNode(selected=node.isSelected())
self.diagram.sgnUpdated.emit() | ashwingoldfish/eddy | eddy/core/commands/nodes.py | Python | gpl-3.0 | 14,989 | 0.000267 |
#!/usr/bin/env python
import os
import os.path
import sys
import string
###############################################################
## #
## Edyta Malolepsza #
## David Wales' group, University of Cambridge #
## in case of problems please send email: em427@cam.ac.uk #
## #
###############################################################
## #
## program finds in prmtop file from LEaP wrong defined order #
## of atoms in IMPROPER, permutes appropriate atoms and write #
## new prmtop file #
## #
## how to use: #
## ./perm-top.py NAME_OF_OLD_PRMTOP NAME_OF_NEW_PRMTOP #
## #
## IMPORTANT: #
## 1. please change names of terminal amino acid residues #
## according to warnings below #
## 2. please change path to libraries #
## 3. program changes the atom order ONLY for amino acid and #
## nucleic residues #
## #
###############################################################
# khs26> changed the path to use the $AMBERHOME environment variable
amberhome = os.environ["AMBERHOME"]
path = os.path.join(amberhome, "dat/leap/lib")
#########################
## some useful functions
#########################
def exchange_atoms(atom_type, a, aa, residue, dihedrals, currentAtomNumber):
find_atom = a[aa.index(residue)].index(atom_type)
atomNumber = find_atom+currentAtomNumber
atomNumberIndex = atomNumber*3
for j in range(len(dihedrals)):
if (dihedrals[j][1]==str(atomNumberIndex)):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
def exchange_atoms_nt(atom_type, a, aa, residue, dihedrals):
find_atom = a[aa.index(residue)].index(atom_type)
for j in range(len(dihedrals)):
if (dihedrals[j][1]==str(atomIndex[find_atom])):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
def exchange_atoms_arg(a, aa, residue, dihedrals, currentAtomNumber):
## IMPROPER responsible for trouble with NH2 group permutation:
find_atom1 = a[aa.index(residue)].index('NE')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('NH1')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
find_atom3 = a[aa.index(residue)].index('CZ')
atomNumber3 = find_atom3+currentAtomNumber
atomNumberIndex3 = atomNumber3*3
find_atom4 = a[aa.index(residue)].index('NH2')
atomNumber4 = find_atom4+currentAtomNumber
atomNumberIndex4 = atomNumber4*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = dihedrals[j][0]
d1 = dihedrals[j][1]
dihedrals[j][0] = d1
dihedrals[j][1] = d0
def exchange_atoms_ring1(a, aa, residue, dihedrals):
find_atom1 = a[aa.index(residue)].index('CD1')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CD2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = '-'+dihedrals[j][0]
d1 = dihedrals[j][1]
d3 = dihedrals[j][3][1:]
dihedrals[j][0] = d1
dihedrals[j][1] = d3
dihedrals[j][3] = d0
def exchange_atoms_ring2(a, aa, residue, dihedrals):
find_atom1 = a[aa.index(residue)].index('CG')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CE2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
find_atom3 = a[aa.index(residue)].index('CZ')
atomNumber3 = find_atom3+currentAtomNumber
atomNumberIndex3 = atomNumber3*3
find_atom4 = a[aa.index(residue)].index('CD2')
atomNumber4 = find_atom4+currentAtomNumber
atomNumberIndex4 = atomNumber4*3
find_atom5 = a[aa.index(residue)].index('CD1')
atomNumber5 = find_atom5+currentAtomNumber
atomNumberIndex5 = atomNumber5*3
find_atom6 = a[aa.index(residue)].index('CE1')
atomNumber6 = find_atom6+currentAtomNumber
atomNumberIndex6 = atomNumber6*3
# for j in range(len(dihedrals)): # this is ok
# if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex3)) and (dihedrals[j][1]==str(atomNumberIndex4))):
d1 = '-'+dihedrals[j][1]
d2 = dihedrals[j][3][1:]
dihedrals[j][1] = d2
dihedrals[j][3] = d1
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex5)) and (dihedrals[j][1]==str(atomNumberIndex3))):
d1 = '-'+dihedrals[j][1]
d2 = dihedrals[j][3][1:]
dihedrals[j][1] = d2
dihedrals[j][3] = d1
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex6))):
## to compare IMPROPER before and after permutation
##test a1 = (int(dihedrals[j][0])-currentAtomNumber)/3
##test a2 = (int(dihedrals[j][1])-currentAtomNumber)/3
##test a3 = (int(dihedrals[j][2][1:])-currentAtomNumber)/3
##test a4 = (int(dihedrals[j][3][1:])-currentAtomNumber)/3
##test print dihedrals[j], a[aa.index(residue)][a1], a[aa.index(residue)][a2], a[aa.index(residue)][a3], a[aa.index(residue)][a4]
d1 = '-'+dihedrals[j][0]
d2 = dihedrals[j][3][1:]
dihedrals[j][0] = d2
dihedrals[j][3] = d1
##test a1 = (int(dihedrals[j][0])-currentAtomNumber)/3
##test a2 = (int(dihedrals[j][1])-currentAtomNumber)/3
##test a3 = (int(dihedrals[j][2][1:])-currentAtomNumber)/3
##test a4 = (int(dihedrals[j][3][1:])-currentAtomNumber)/3
##test print dihedrals[j], a[aa.index(residue)][a1], a[aa.index(residue)][a2], a[aa.index(residue)][a3], a[aa.index(residue)][a4]
def exchange_atoms_ring3(a, aa, residue, dihedrals):
find_atom1 = a[aa.index(residue)].index('CE1')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CE2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = '-'+dihedrals[j][0]
d1 = dihedrals[j][1]
d3 = dihedrals[j][3][1:]
dihedrals[j][0] = d1
dihedrals[j][1] = d3
dihedrals[j][3] = d0
####################################
## reading all_amino02.lib library
####################################
print '\nDear user, please notice that only residues from the following libraries are taken into account:'
print ' ions94.lib'
print ' all_amino02.lib'
aalib = open("%s/all_amino02.lib" % path).read()
aa = string.split(aalib, "\n")
q1 = aa.index("!!index array str")
q2 = aa.index("!entry.ALA.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg")
aaNames = [] # amino acid names
aTypes = [] # atom types
aNames = [] # atom names
for i in range(q2-q1-1):
aaNames.append(aa[q1+1+i][2:5])
for i in range(len(aaNames)):
q1 = aa.index("!entry.%s.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg" % aaNames[i])
q2 = aa.index("!entry.%s.unit.atomspertinfo table str pname str ptype int ptypex int pelmnt dbl pchg" % aaNames[i])
aT = []
aN = []
for j in range(q2-q1-1):
aT.append((string.split(aa[q1+1+j])[0]).replace('"',''))
aN.append((string.split(aa[q1+1+j])[1]).replace('"',''))
aTypes.append(aT)
aNames.append(aN)
######################################
## reading all_aminont02.lib library
######################################
print ' all_aminont02.lib'
aalib = open("%s/all_aminont02.lib" % path).read()
aa = string.split(aalib, "\n")
q1 = aa.index("!!index array str")
q2 = aa.index("!entry.ACE.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg")
aantNames = [] # N terminus amino acid names
antTypes = [] # N terminus atom types
antNames = [] # N terminus atom names
for i in range(q2-q1-1):
aantNames.append((aa[q1+1+i].replace('"','')).replace(' ',''))
for i in range(len(aantNames)):
q1 = aa.index("!entry.%s.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg" % aantNames[i])
q2 = aa.index("!entry.%s.unit.atomspertinfo table str pname str ptype int ptypex int pelmnt dbl pchg" % aantNames[i])
aT = []
aN = []
for j in range(q2-q1-1):
aT.append((string.split(aa[q1+1+j])[0]).replace('"',''))
aN.append((string.split(aa[q1+1+j])[1]).replace('"',''))
antTypes.append(aT)
antNames.append(aN)
######################################
## reading all_aminoct02.lib library
######################################
print ' all_aminoct02.lib'
aalib = open("%s/all_aminoct02.lib" % path).read()
aa = string.split(aalib, "\n")
q1 = aa.index("!!index array str")
q2 = aa.index("!entry.CALA.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg")
aactNames = [] # C terminus amino acid names
actTypes = [] # C terminus atom types
actNames = [] # C terminus atom names
for i in range(q2-q1-1):
aactNames.append((aa[q1+1+i].replace('"','')).replace(' ',''))
for i in range(len(aactNames)):
q1 = aa.index("!entry.%s.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg" % aactNames[i])
q2 = aa.index("!entry.%s.unit.atomspertinfo table str pname str ptype int ptypex int pelmnt dbl pchg" % aactNames[i])
aT = []
aN = []
for j in range(q2-q1-1):
aT.append((string.split(aa[q1+1+j])[0]).replace('"',''))
aN.append((string.split(aa[q1+1+j])[1]).replace('"',''))
actTypes.append(aT)
actNames.append(aN)
#####################################
## reading all_nucleic02.lib library
#####################################
print ' all_nucleic02.lib'
aalib = open("%s/all_nucleic02.lib" % path).read()
aa = string.split(aalib, "\n")
q1 = aa.index("!!index array str")
q2 = aa.index("!entry.DA.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg")
nucNames = [] # nucleic names
nucTypes = [] # nucleic atom types
nucaNames = [] # nucleic atom names
for i in range(q2-q1-1):
nucNames.append((aa[q1+1+i].replace('"','')).replace(' ',''))
for i in range(len(nucNames)):
q1 = aa.index("!entry.%s.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg" % nucNames[i])
q2 = aa.index("!entry.%s.unit.atomspertinfo table str pname str ptype int ptypex int pelmnt dbl pchg" % nucNames[i])
aT = []
aN = []
for j in range(q2-q1-1):
aT.append((string.split(aa[q1+1+j])[0]).replace('"',''))
aN.append((string.split(aa[q1+1+j])[1]).replace('"',''))
nucTypes.append(aT)
nucaNames.append(aN)
#################################
## reading original prmtop file
#################################
prmtop = open(sys.argv[1]).read()
f = string.split(prmtop, "\n")
q0 = f.index("%FLAG POINTERS ")
q1 = f.index("%FLAG ATOM_NAME ")
q2 = f.index("%FLAG CHARGE ")
q3 = f.index("%FLAG RESIDUE_LABEL ")
q4 = f.index("%FLAG RESIDUE_POINTER ")
q5 = f.index("%FLAG DIHEDRALS_INC_HYDROGEN ")
q6 = f.index("%FLAG DIHEDRALS_WITHOUT_HYDROGEN ")
q7 = f.index("%FLAG EXCLUDED_ATOMS_LIST ")
## names of tables are related to names in prmtop file
atomNumber = int(string.split(f[q0+2])[0])
atomName = []
residueLabel = []
dihedralsIncHydrogen = []
dihedralsWithoutHydrogen = []
atomIndex = []
an = 0
line = 0
while (an<atomNumber):
for j in range(20):
if (an<atomNumber):
an = an+1
atomName.append(f[q1+2+line][j*4:(j+1)*4].strip())
else:
break
line = line+1
for i in range(q4-q3-2):
for j in range((len(f[q3+2+i])+1)/4):
residueLabel.append(string.strip(f[q3+2+i][j*4:4*(j+1)]))
caa = 0
naa = 0
for i in range(len(residueLabel)):
if (aactNames.count(residueLabel[i])>0): caa = caa+1
if (aantNames.count(residueLabel[i])>0): naa = naa+1
if (caa==0):
print "\n-----------------------------------------------------------------------------"
print 'There is no C terminus amino acid in topology file!'
print 'If system does not contain amino acids - continue.'
print 'Otherwise please rename each C terminal residue by adding \'C\' to the name, e.g. ALA -> CALA'
print 'Remember to follow format of topology file!'
print "-----------------------------------------------------------------------------\n"
## the only exception for C terminus amino acids is NME
if (naa==0):
print "-----------------------------------------------------------------------------"
print 'There is no N terminus amino acid in topology file!'
print 'If system does not contain amino acids - continue.'
print 'Otherwise please rename each N terminal residue by adding \'N\' to the name, e.g. ALA -> NALA'
print 'Remember to follow format of topology file!'
print "-----------------------------------------------------------------------------"
## the only exception for N terminus amino acids is ACE
for i in range(q6-q5-2):
for j in range(len(string.split(f[q5+2+i]))/5):
dihedralsIncHydrogen.append(string.split(f[q5+2+i][j*40:40*(j+1)]))
for i in range(q7-q6-2):
for j in range(len(string.split(f[q6+2+i]))/5):
dihedralsWithoutHydrogen.append(string.split(f[q6+2+i][j*40:40*(j+1)]))
for i in range(len(atomName)):
atomIndex.append(i*3)
############################################################
## groups of amino acids according to permutation behaviour
############################################################
## group0 and group0n: nothing to do
## group1: problem only with C terminus COO- group: CA-O-C-OXT -> O-CA-C-OXT
group0 = ['GLY','ALA','VAL','LEU','MET','ILE','SER','THR','CYS','LYS','HIP','HIE','HID']
group0n = ['NGLY','NALA','NVAL','NLEU','NMET','NILE','NSER','NTHR','NCYS','NLYS','NHIP','NHIE','NHID']
group1 = ['CGLY','CALA','CVAL','CLEU','CMET','CILE','CSER','CTHR','CCYS','CLYS','CPRO','CTRP','CHIP','CHIE','CHID']
#################################################################
## groups of nucleic residues according to permutation behaviour
#################################################################
group2 = ['DA', 'DA3', 'DA5', 'DAN', 'RA', 'RA3', 'RA5', 'RAN']
group3 = ['DC', 'DC3', 'DC5', 'DCN', 'RC', 'RC3', 'RC5', 'RCN']
group4 = ['DG', 'DG3', 'DG5', 'DGN', 'RG', 'RG3', 'RG5', 'RGN']
group5 = ['DT', 'DT3', 'DT5', 'DTN', 'RU', 'RU3', 'RU5', 'RUN'] ## nothing to do
#####################################################
## groups of species without any IMPROPER to correct
#####################################################
group7 = ['WAT', 'CIO', 'Cl-', 'Cs+', 'IB', 'K+', 'Li+', 'MG2', 'Na+', 'Rb+']
## groupUknown: residue not counted in library
groupUknown = []
for i in range(len(residueLabel)):
if ((aaNames.count(residueLabel[i])==0) and (aactNames.count(residueLabel[i])==0) and (aantNames.count(residueLabel[i])==0) and (nucNames.count(residueLabel[i])==0) and (group7.count(residueLabel[i])==0)):
groupUknown.append(residueLabel[i])
if (len(groupUknown)!=0):
print '\nThere are some residues missing in considered libraries:', groupUknown
print 'Program just skips them\n'
currentAtomNumber = 0
######################################################################
## main part - permutation of atom positions in appropriate IMPROPERs
######################################################################
for i in range(len(residueLabel)):
# print '----', i+1, '----', residueLabel[i]
if (group7.count(residueLabel[i])>0): continue
elif (groupUknown.count(residueLabel[i])>0): continue
##################################
## residue not counted in library
##################################
elif (aantNames.count(residueLabel[i])>0):
#########################
## N terminus amino acid
#########################
if (group0n.count(residueLabel[i])>0):
currentAtomNumber = currentAtomNumber+len(antTypes[aantNames.index(residueLabel[i])])
continue
elif (residueLabel[i]=='NASN'):
exchange_atoms_nt('HD21', antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
elif (residueLabel[i]=='NGLN'):
exchange_atoms_nt('HE21', antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
elif (residueLabel[i]=='NARG'):
exchange_atoms_nt('HH11', antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_nt('HH21', antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_arg(antTypes, aantNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='NASP'):
exchange_atoms_nt('OD1', antTypes, aantNames, residueLabel[i], dihedralsWithoutHydrogen)
elif (residueLabel[i]=='NGLU'):
exchange_atoms_nt('OE1', antTypes, aantNames, residueLabel[i], dihedralsWithoutHydrogen)
elif (residueLabel[i]=='NPHE'):
exchange_atoms_ring1(antTypes, aantNames, residueLabel[i], dihedralsWithoutHydrogen)
exchange_atoms_ring2(antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_ring3(antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
elif (residueLabel[i]=='NTYR'):
exchange_atoms_ring1(antTypes, aantNames, residueLabel[i], dihedralsWithoutHydrogen)
exchange_atoms_ring2(antTypes, aantNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_ring3(antTypes, aantNames, residueLabel[i], dihedralsWithoutHydrogen)
# if (dihedralsWithoutHydrogen[j].count(str(atomNumberIndex1))>0):
# print '"""', dihedralsWithoutHydrogen[j], atomNumberIndex1, dihedralsWithoutHydrogen[j].count(str(atomNumberIndex1))
# print '!!!', dihedralsWithoutHydrogen[j], atomNumberIndex1, dihedralsWithoutHydrogen[j].index(str(atomNumberIndex1))
currentAtomNumber = currentAtomNumber+len(antTypes[aantNames.index(residueLabel[i])])
elif (aactNames.count(residueLabel[i])>0):
#########################
## C terminus amino acid
#########################
if (group1.count(residueLabel[i])>0): ## res belongs to group1
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CASP'):
exchange_atoms('OD1', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CGLU'):
exchange_atoms('OE1', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CGLN'):
exchange_atoms('HE21', actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CASN'):
exchange_atoms('HD21', actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CARG'):
exchange_atoms('HH11', actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
exchange_atoms('HH21', actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
exchange_atoms_arg(actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CPHE'):
exchange_atoms_ring1(actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen)
exchange_atoms_ring2(actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_ring3(actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='CTYR'):
exchange_atoms_ring1(actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen)
exchange_atoms_ring2(actTypes, aactNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms('O', actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
exchange_atoms_ring3(actTypes, aactNames, residueLabel[i], dihedralsWithoutHydrogen)
currentAtomNumber = currentAtomNumber+len(actTypes[aactNames.index(residueLabel[i])])
elif (aaNames.count(residueLabel[i])>0):
###########################
## not terminal amino acid
###########################
if (group0.count(residueLabel[i])>0):
currentAtomNumber = currentAtomNumber+len(aTypes[aaNames.index(residueLabel[i])])
continue
elif (residueLabel[i]=='GLU'):
exchange_atoms('OE1', aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='ASP'):
exchange_atoms('OD1', aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='ASN'):
exchange_atoms('HD21', aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
elif (residueLabel[i]=='GLN'):
exchange_atoms('HE21', aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
elif (residueLabel[i]=='ARG'):
exchange_atoms('HH11', aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
exchange_atoms('HH21', aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
exchange_atoms_arg(aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
elif (residueLabel[i]=='PHE'):
exchange_atoms_ring1(aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen)
exchange_atoms_ring2(aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_ring3(aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen)
elif (residueLabel[i]=='TYR'):
exchange_atoms_ring1(aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen)
exchange_atoms_ring2(aTypes, aaNames, residueLabel[i], dihedralsIncHydrogen)
exchange_atoms_ring3(aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen)
currentAtomNumber = currentAtomNumber+len(aTypes[aaNames.index(residueLabel[i])])
elif (nucNames.count(residueLabel[i])>0):
###################
## nucleic residue
###################
if (group5.count(residueLabel[i])>0):
currentAtomNumber = currentAtomNumber+len(nucTypes[nucNames.index(residueLabel[i])])
continue
elif (group2.count(residueLabel[i])>0):
exchange_atoms('H61', nucTypes, nucNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
elif (group3.count(residueLabel[i])>0):
exchange_atoms('H41', nucTypes, nucNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
elif (group4.count(residueLabel[i])>0):
exchange_atoms('H21', nucTypes, nucNames, residueLabel[i], dihedralsIncHydrogen, currentAtomNumber)
currentAtomNumber = currentAtomNumber+len(nucTypes[nucNames.index(residueLabel[i])])
else:
print 'Something strange happened... residue %d is neither in libraries and nor out of libraries' % residueLabel[i]
sys.exit()
##################################
## preparation of new prmtop file
##################################
newprmtop = open(sys.argv[2],'w')
for i in range(q5+2):
newprmtop.write("%s\n" % f[i])
an = 0
line = 0
while (an<len(dihedralsIncHydrogen)):
for j in range(2):
if (an<len(dihedralsIncHydrogen)):
for k in range(5):
newprmtop.write("%8s" % (dihedralsIncHydrogen[an][k]))
an = an+1
else:
break
newprmtop.write("\n")
line = line+1
for i in range(2):
newprmtop.write("%s\n" % f[q6+i])
an = 0
line = 0
while (an<len(dihedralsWithoutHydrogen)):
for j in range(2):
if (an<len(dihedralsWithoutHydrogen)):
for k in range(5):
newprmtop.write("%8s" % (dihedralsWithoutHydrogen[an][k]))
an = an+1
else:
break
newprmtop.write("\n")
line = line+1
for i in range(len(f)-q7-1):
newprmtop.write("%s\n" % f[q7+i])
newprmtop.close()
| marktoakley/LamarckiAnt | SCRIPTS/AMBER/symmetrise_prmtop/perm-prmtop.ff02.py | Python | gpl-3.0 | 25,578 | 0.020916 |
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
def int_to_bin_str(value, max_bits=8192):
"""Convert an int to a string representation of a bitmask (binary number)"""
mask = value
bits = 1
while 1 << bits < value or bits < 16 and bits < max_bits:
bits *= 2
rep = ''
while bits:
rep = ('1' if mask & 1 else '0') + rep
bits = bits - 1
mask = mask >> 1
return '0b' + rep
| lhupfeldt/multiconf | multiconf/bits.py | Python | bsd-3-clause | 515 | 0.001942 |
#
__author__ = 'Michael Pfister'
__version__ = '1.4.0'
| pfitzer/youtube2mp3 | youtube2mp3/__init__.py | Python | gpl-2.0 | 55 | 0 |
from modules.chart_module import ChartModule
import tornado.web
import logging
class LineChartModule(ChartModule):
def render(self, raw_data, keys, chart_id="linechart"):
self.chart_id = chart_id
self.chart_data = self.overtime_linechart_data(raw_data, keys)
return self.render_string('modules/linechart.html', chart_id=self.chart_id)
def overtime_linechart_data(self, raw_data, keys,
yearterms_key='fcqs_yearterms',
overtime_key='fcqs_overtime'):
def _overtime_builder(overtime_data, key):
def _transform_overtime_data(yearterm):
value = overtime_data[str(yearterm)][key]
roundto = {
'percent_a': 3,
'percent_b': 3,
'percent_c': 3,
'percent_d': 3,
'percent_f': 3,
'percent_incomplete': 3,
'average_grade': 3
}.get(key, 1)
if value is not None:
return round(value, roundto)
else:
return None
return _transform_overtime_data
def _overtime_dataset_builder(key):
color = {
'course_howmuchlearned_average': (247, 92, 3),
'course_challenge_average': (217, 3, 104),
'courseoverall_average': (130, 2, 99),
'course_priorinterest_average': (4, 167, 119),
'instructor_effectiveness_average': (247, 92, 3),
'instructor_respect_average': (217, 3, 104),
'instructoroverall_average': (130, 2, 99),
'instructor_availability_average': (4, 167, 119),
'TTT_instructoroverall_average': (197, 27, 125),
'OTH_instructoroverall_average': (233, 163, 201),
'TA_instructoroverall_average': (253, 224, 239),
'GR_courseoverall_average': (77, 146, 33),
'UD_courseoverall_average': (161, 215, 106),
'LD_courseoverall_average': (230, 245, 106),
'percent_a': (44, 123, 182),
'percent_b': (171, 217, 233),
'percent_c': (255, 255, 191),
'percent_d': (253, 174, 97),
'percent_f': (215, 25, 28),
'percent_incomplete': (48, 48, 48),
'average_grade': (48, 48, 48),
}.get(key, (48, 48, 48))
yaxis_id = {
'percent_a': 'y-axis-3',
'percent_b': 'y-axis-3',
'percent_c': 'y-axis-3',
'percent_d': 'y-axis-3',
'percent_f': 'y-axis-3',
'percent_incomplete': 'y-axis-3',
'average_grade': 'y-axis-2',
}.get(key, 'y-axis-1')
fill = {
'percent_a': True,
'percent_b': True,
'percent_c': True,
'percent_d': True,
'percent_f': True,
'percent_incomplete': True,
}.get(key, False)
label = {
'course_howmuchlearned_average': 'Amount Learned',
'course_challenge_average': 'Challenge',
'courseoverall_average': 'Course Overall',
'course_priorinterest_average': 'Prior Interest',
'instructor_effectiveness_average': 'Effectiveness',
'instructor_respect_average': 'Respect',
'instructoroverall_average': 'Instructor Overall',
'instructor_availability_average': 'Availability',
'TTT_instructoroverall_average': 'TTT instructors',
'OTH_instructoroverall_average': 'OTH instructors',
'TA_instructoroverall_average': 'TA instructors',
'GR_courseoverall_average': 'GR Course Overall',
'UD_courseoverall_average': 'UD Course Overall',
'LD_courseoverall_average': 'LD Course Overall',
'percent_a': 'A Grade',
'percent_b': 'B Grade',
'percent_c': 'C Grade',
'percent_d': 'D Grade',
'percent_f': 'F Grade',
'percent_incomplete': 'Incomplete',
'average_grade': 'Average GPA'
}.get(key, '???')
background_alpha = 1.0 if fill else 0.2
return {
'label': label,
'fill': fill,
'yAxisID': yaxis_id,
'backgroundColor': "rgba({0},{1},{2},{background_alpha})".format(*color, background_alpha=background_alpha),
'borderColor': "rgba({0},{1},{2},1)".format(*color),
'pointBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBorderColor': "#fff",
'pointHoverBorderWidth': 2,
'pointHoverRadius': 5,
'data': list(map(_overtime_builder(overtime_data, key), yearterms))
}
yearterms = raw_data[yearterms_key]
overtime_data = raw_data[overtime_key]
labels = list(map(self.convert_date, yearterms))
datasets = list(map(_overtime_dataset_builder, keys))
return tornado.escape.json_encode({
'labels': labels,
'datasets': datasets,
})
def embedded_javascript(self):
options = tornado.escape.json_encode(self.chart_options())
foo = '''
new Chart(document.getElementById("{2}").getContext("2d"),{{
type:'line',
data:{1},
options:{0}
}});
'''.format(options, self.chart_data, self.chart_id)
return foo
| SFII/cufcq-new | modules/linechart_module.py | Python | mit | 5,898 | 0.000848 |
def firstCharacter(str):
return str[:1]
assert(firstCharacter("abc") is "a")
def lastCharacter(str):
return str[-1:]
assert(lastCharacter("abc") is "c")
def middleCharacters(str):
return str[1:-1]
assert(middleCharacters("abc") == "b")
assert(middleCharacters("abcde") == "bcd")
def isPalindrome(str):
if len(str) <= 1:
return True
if firstCharacter(str) != lastCharacter(str):
return False
return isPalindrome(middleCharacters(str))
assert(isPalindrome("a") == True)
assert(isPalindrome("taste") == False)
assert(isPalindrome("roror") == True)
| iandmyhand/python-utils | DataStructuresAndAlgorithmsInPython/Palindrome.py | Python | mit | 595 | 0.011765 |
if __name__ == '__main__':
import os
from bs4 import BeautifulSoup
def get_class(cls):
class_name = cls['name']
_instance_methods = cls.find_all('method', recursive=False, class_method=lambda m: m != 'true')
retval = cls.find('retval')
if retval:
if retval.has_attr('declared_type64'):
return_type = retval['declared_type64']
else:
return_type = retval['declared_type']
else:
return_type = 'void'
instance_methods = [
{ 'name': method['selector'], 'args': get_args(method), 'return': return_type }
for method in _instance_methods
]
_class_methods = cls.find_all('method', recursive=False, class_method='true')
class_methods = [
{ 'name': method['selector'], 'args': get_args(method), 'return': return_type }
for method in _class_methods
]
return {
'name': class_name,
'methods': instance_methods,
'class_methods': class_methods
}
def get_func(func):
retval = func.find('retval')
if retval:
if retval.has_attr('declared_type64'):
return_type = retval['declared_type64']
else:
return_type = retval['declared_type']
else:
return_type = 'void'
return { 'name': func['name'], 'args': get_args(func), 'return': return_type }
def get_args(method):
return [
get_arg_name(selector, index)
for index, selector in enumerate(method.find_all('arg', recursive=False))
]
def get_arg_name(selector, index):
if selector.has_attr('declared_type'):
declared_type = selector['declared_type']
else:
declared_type = 'id'
if selector.has_attr('name'):
return { 'name': selector['name'], 'type': declared_type}
else:
return { 'name': str(selector), 'type': declared_type}
def get_const_name(const):
# do this at "output time"
# return const['name'][0].upper() + const['name'][1:]
return const['name']
RUBYMOTION_FOLDER = '/Library/RubyMotion/data/'
def parse_bridgesupport(prefix):
everything = {}
for filename in os.listdir(os.path.join(RUBYMOTION_FOLDER, prefix)):
name, ext = os.path.splitext(filename)
print((prefix + '/' + name).replace('/BridgeSupport/', '/'))
bridgesupport = BeautifulSoup(open(os.path.join(RUBYMOTION_FOLDER, prefix, name + '.bridgesupport')), 'xml')
_constants = bridgesupport.find('signatures').find_all('constant', recursive=False)
_enums = bridgesupport.find('signatures').find_all('enum', recursive=False)
constants = [get_const_name(const) for const in _constants]
constants.extend([get_const_name(const) for const in _enums])
_functions = bridgesupport.find('signatures').find_all('function', recursive=False)
functions = [get_func(func) for func in _functions]
_classes = bridgesupport.find('signatures').find_all('class', recursive=False)
classes = {}
for cls in _classes:
entry = get_class(cls)
classes[entry['name']] = entry
_protocols = bridgesupport.find('signatures').find_all('informal_protocol', recursive=False)
protocols = {}
for proto in _protocols:
entry = get_class(proto)
protocols[entry['name']] = entry
framework = {
'name': name,
'classes': classes,
'protocols': protocols,
'constants': constants,
'functions': functions,
}
everything[name] = framework
return everything
all_the_things = { 'ios': None, 'osx': None }
ios_attempt = 'ios/8.0/BridgeSupport', 'ios/7.1/BridgeSupport', 'ios/7.0/BridgeSupport', 'ios/6.1/BridgeSupport', 'ios/6.0/BridgeSupport'
found = None
for version in ios_attempt:
if os.path.exists(os.path.join(RUBYMOTION_FOLDER, version)):
found = version
break
if not found:
raise 'Couldn\'t find an iOS version'
all_the_things['ios'] = parse_bridgesupport(found)
osx_attempt = 'osx/10.10/BridgeSupport', 'osx/10.9/BridgeSupport', 'osx/10.8/BridgeSupport'
found = None
for version in osx_attempt:
if os.path.exists(os.path.join(RUBYMOTION_FOLDER, version)):
found = version
break
if not found:
raise 'Couldn\'t find an OS X version'
all_the_things['osx'] = parse_bridgesupport(version)
import json
with open('all_the_things.json', 'w') as fptr:
print('Writing all_the_things.json')
json.dump(all_the_things, fptr)
| colinta/MotionLookitup | compile.py | Python | bsd-2-clause | 4,942 | 0.006273 |
import os
import sys
import shutil
from glob import glob
# --------------------------------------------------------------------------
DOC_DIR = 'hr-html'
PYWRAPS_FN = 'idaapi.py'
# --------------------------------------------------------------------------
def add_footer(lines):
S1 = 'Generated by Epydoc'
S2 = '</table>'
p = lines.find(S1)
if p == -1:
return None
p = lines.find(S2, p)
if p == -1:
return None
p += len(S2)
return lines[0:p] + '\n<!--#include virtual="/footer.shtml" -->' + lines[p:]
# --------------------------------------------------------------------------
def define_idaapi_resolver():
"""
Whenever a module named \"idaapi_<something>\" is
spotted, turn it into \"idaapi\".
"""
import epydoc.apidoc
dn = epydoc.apidoc.DottedName.__init__
def resolver(piece):
if piece is not None and isinstance(piece, basestring) and piece.startswith("idaapi_"):
return "idaapi"
else:
return piece
def wrapper(self, *pieces, **options):
return dn(self, *map(resolver, pieces), **options);
epydoc.apidoc.DottedName.__init__ = wrapper
# --------------------------------------------------------------------------
def gen_docs():
import epydoc.cli
import swigdocs
define_idaapi_resolver()
swigdocs.gen_docs(outfn = 'pywraps.py')
# append obj/x86_win_vc_32/idaapi.py to it
# os.system(r'copy /b idaapi.py+..\obj\x86_win_vc_32\idaapi.py idaapi.py')
# delete all output files
for fn in glob('hr-html/*'):
os.unlink(fn)
epydoc.cli.optparse.sys.argv = [ 'epydoc',
'--config', '../hrdoc.cfg',
'--simple-term'
]
# Generate the documentation
epydoc.cli.cli()
# --------------------------------------------------------------------------
def patch_docs():
shutil.copy('../../hrdoc.css', 'epydoc.css')
os.system('chmod +w epydoc.css')
for fn in glob('*.html'):
f = open(fn, 'r')
lines = f.read()
f.close()
r = add_footer(lines)
if not r:
print "-",
continue
f = open(fn, 'w')
f.write(r)
f.close()
print "+",
print "\nDocumentation patched!"
# --------------------------------------------------------------------------
def main():
# Save old directory and adjust import path
curdir = os.getcwd() + os.sep
sys.path.append(curdir + 'python')
sys.path.append(curdir + 'tools')
sys.path.append(curdir + 'docs')
old_dir = os.getcwd()
try:
print "Generating documentation....."
os.chdir('docs')
gen_docs()
os.chdir(DOC_DIR)
patch_docs()
print "Documentation generated!"
finally:
os.chdir(old_dir)
# --------------------------------------------------------------------------
if __name__ == '__main__':
main()
Exit(0)
| zachriggle/idapython | hrdoc.py | Python | bsd-3-clause | 3,025 | 0.00562 |
"""
Tools to put to good use INE data
""" | JoaquimPatriarca/senpy-for-gis | gasp/ine/__init__.py | Python | gpl-3.0 | 41 | 0.02439 |
#!/usr/bin/python
# (c) 2019, NetApp Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""AWS Cloud Volumes Services - Manage fileSystem"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_netapp_cvs_FileSystems
short_description: NetApp AWS Cloud Volumes Service Manage FileSystem.
extends_documentation_fragment:
- netapp.awscvs
version_added: '2.9'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, Update, Delete fileSystem on AWS Cloud Volumes Service.
options:
state:
description:
- Whether the specified fileSystem should exist or not.
required: true
choices: ['present', 'absent']
type: str
region:
description:
- The region to which the filesystem belongs to.
required: true
type: str
creationToken:
description:
- Name of the filesystem
required: true
type: str
quotaInBytes:
description:
- Size of the filesystem
- Required for create
type: int
serviceLevel:
description:
- Service Level of a filesystem.
choices: ['standard', 'premium', 'extreme']
type: str
exportPolicy:
description:
- The policy rules to export the filesystem
type: dict
suboptions:
rules:
description:
- Set of rules to export the filesystem
- Requires allowedClients, access and protocol
type: list
suboptions:
allowedClients:
description:
- Comma separated list of ip address blocks of the clients to access the fileSystem
- Each address block contains the starting IP address and size for the block
type: str
cifs:
description:
- Enable or disable cifs filesystem
type: bool
nfsv3:
description:
- Enable or disable nfsv3 fileSystem
type: bool
nfsv4:
description:
- Enable or disable nfsv4 filesystem
type: bool
ruleIndex:
description:
- Index number of the rule
type: int
unixReadOnly:
description:
- Should fileSystem have read only permission or not
type: bool
unixReadWrite:
description:
- Should fileSystem have read write permission or not
type: bool
'''
EXAMPLES = """
- name: Create FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
exportPolicy:
rules:
- allowedClients: 172.16.0.4
cifs: False
nfsv3: True
nfsv4: True
ruleIndex: 1
unixReadOnly: True
unixReadWrite: False
quotaInBytes: 100000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
- name: Update FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
exportPolicy:
rules:
- allowedClients: 172.16.0.4
cifs: False
nfsv3: True
nfsv4: True
ruleIndex: 1
unixReadOnly: True
unixReadWrite: False
quotaInBytes: 200000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
- name: Delete FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
quotaInBytes: 100000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
"""
RETURN = """
"""
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp_module import NetAppModule
from ansible.module_utils.netapp import AwsCvsRestAPI
class AwsCvsNetappFileSystem(object):
"""
Contains methods to parse arguments,
derive details of AWS_CVS objects
and send requests to AWS CVS via
the restApi
"""
def __init__(self):
"""
Parse arguments, setup state variables,
check paramenters and ensure request module is installed
"""
self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
region=dict(required=True, type='str'),
creationToken=dict(required=True, type='str'),
quotaInBytes=dict(required=False, type='int'),
serviceLevel=dict(required=False, choices=['standard', 'premium', 'extreme']),
exportPolicy=dict(
type='dict',
options=dict(
rules=dict(
type='list',
options=dict(
allowedClients=dict(required=False, type='str'),
cifs=dict(required=False, type='bool'),
nfsv3=dict(required=False, type='bool'),
nfsv4=dict(required=False, type='bool'),
ruleIndex=dict(required=False, type='int'),
unixReadOnly=dict(required=False, type='bool'),
unixReadWrite=dict(required=False, type='bool')
)
)
)
),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['region', 'creationToken', 'quotaInBytes']),
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic AWSCVS restApi class
self.restApi = AwsCvsRestAPI(self.module)
self.data = {}
for key in self.parameters.keys():
self.data[key] = self.parameters[key]
def get_filesystemId(self):
# Check given FileSystem is exists
# Return fileSystemId is found, None otherwise
list_filesystem, error = self.restApi.get('FileSystems')
if error:
self.module.fail_json(msg=error)
for FileSystem in list_filesystem:
if FileSystem['creationToken'] == self.parameters['creationToken']:
return FileSystem['fileSystemId']
return None
def get_filesystem(self, fileSystemId):
# Get FileSystem information by fileSystemId
# Return fileSystem Information
filesystemInfo, error = self.restApi.get('FileSystems/%s' % fileSystemId)
if error:
self.module.fail_json(msg=error)
else:
return filesystemInfo
return None
def is_job_done(self, response):
# check jobId is present and equal to 'done'
# return True on success, False otherwise
try:
job_id = response['jobs'][0]['jobId']
except TypeError:
job_id = None
if job_id is not None and self.restApi.get_state(job_id) == 'done':
return True
return False
def create_fileSystem(self):
# Create fileSystem
api = 'FileSystems'
response, error = self.restApi.post(api, self.data)
if not error:
if self.is_job_done(response):
return
error = "Error: unexpected response on FileSystems create: %s" % str(response)
self.module.fail_json(msg=error)
def delete_fileSystem(self, fileSystemId):
# Delete FileSystem
api = 'FileSystems/' + fileSystemId
self.data = None
response, error = self.restApi.delete(api, self.data)
if not error:
if self.is_job_done(response):
return
error = "Error: unexpected response on FileSystems delete: %s" % str(response)
self.module.fail_json(msg=error)
def update_fileSystem(self, fileSystemId):
# Update FileSystem
api = 'FileSystems/' + fileSystemId
response, error = self.restApi.put(api, self.data)
if not error:
if self.is_job_done(response):
return
error = "Error: unexpected response on FileSystems update: %s" % str(response)
self.module.fail_json(msg=error)
def apply(self):
"""
Perform pre-checks, call functions and exit
"""
fileSystem = None
fileSystemId = self.get_filesystemId()
if fileSystemId:
# Getting the FileSystem details
fileSystem = self.get_filesystem(fileSystemId)
cd_action = self.na_helper.get_cd_action(fileSystem, self.parameters)
if cd_action is None and self.parameters['state'] == 'present':
# Check if we need to update the fileSystem
update_fileSystem = False
if fileSystem['quotaInBytes'] is not None and 'quotaInBytes' in self.parameters \
and fileSystem['quotaInBytes'] != self.parameters['quotaInBytes']:
update_fileSystem = True
elif fileSystem['creationToken'] is not None and 'creationToken' in self.parameters \
and fileSystem['creationToken'] != self.parameters['creationToken']:
update_fileSystem = True
elif fileSystem['serviceLevel'] is not None and 'serviceLevel' in self.parameters \
and fileSystem['serviceLevel'] != self.parameters['serviceLevel']:
update_fileSystem = True
elif fileSystem['exportPolicy']['rules'] is not None and 'exportPolicy' in self.parameters:
for rule_org in fileSystem['exportPolicy']['rules']:
for rule in self.parameters['exportPolicy']['rules']:
if rule_org['allowedClients'] != rule['allowedClients']:
update_fileSystem = True
elif rule_org['unixReadOnly'] != rule['unixReadOnly']:
update_fileSystem = True
elif rule_org['unixReadWrite'] != rule['unixReadWrite']:
update_fileSystem = True
if update_fileSystem:
self.na_helper.changed = True
result_message = ""
if self.na_helper.changed:
if self.module.check_mode:
# Skip changes
result_message = "Check mode, skipping changes"
else:
if cd_action == "create":
self.create_fileSystem()
result_message = "FileSystem Created"
elif cd_action == "delete":
self.delete_fileSystem(fileSystemId)
result_message = "FileSystem Deleted"
else: # modify
self.update_fileSystem(fileSystemId)
result_message = "FileSystem Updated"
self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
def main():
"""
Main function
"""
aws_cvs_netapp_filesystem = AwsCvsNetappFileSystem()
aws_cvs_netapp_filesystem.apply()
if __name__ == '__main__':
main()
| hyperized/ansible | lib/ansible/modules/cloud/amazon/aws_netapp_cvs_FileSystems.py | Python | gpl-3.0 | 12,011 | 0.001915 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
u"""
Finite Horizon Optimal Control
author Atsushi Sakai
"""
import numpy as np
import scipy.linalg as la
def CalcFiniteHorizonOptimalInput(A,B,Q,R,P,N,x0):
u"""
Calc Finite Horizon Optimal Input
# TODO optimize
in: see below
min x'Px+sum(x'Qx+u'Ru)
s.t xk+1=Axk+Bu
out: uopt optimal input
"""
# print("CalcFiniteHorizonOptimalInput start")
# data check
if A.shape[1] is not x0.shape[0]:
print("Data Error: A's col == x0's row")
print("A shape:")
print(A.shape)
print("x0 shape:")
print(x0.shape)
return None
elif B.shape[1] is not R.shape[1]:
print("Data Error: B's col == R's row")
print("B shape:")
print(B.shape)
print("R's shape:")
print(R.shape)
return None
sx=np.eye(A.ndim)
su=np.zeros((A.ndim,B.shape[1]*N))
#calc sx,su
for i in range(N):
#generate sx
An=np.linalg.matrix_power(A, i+1)
sx=np.r_[sx,An]
#generate su
tmp=None
for ii in range(i+1):
tm=np.linalg.matrix_power(A, ii)*B
if tmp is None:
tmp=tm
else:
tmp =np.c_[tm,tmp]
for ii in np.arange(i,N-1):
tm=np.zeros(B.shape)
if tmp is None:
tmp=tm
else:
tmp =np.c_[tmp,tm]
su=np.r_[su,tmp]
tm1=np.eye(N+1)
tm1[N,N]=0
tm2=np.zeros((N+1,N+1))
tm2[N,N]=1
Qbar=np.kron(tm1,Q)+np.kron(tm2,P)
Rbar=np.kron(np.eye(N),R)
uopt=-(su.T*Qbar*su+Rbar).I*su.T*Qbar*sx*x0
# print(uBa)
costBa=x0.T*(sx.T*Qbar*sx-sx.T*Qbar*su*(su.T*Qbar*su+Rbar).I*su.T*Qbar*sx)*x0
# print(costBa)
return uopt
if __name__ == '__main__':
import matplotlib.pyplot as plt
A=np.matrix([[0.77,-0.35],[0.49,0.91]])
print("A:")
print(A)
B=np.matrix([0.04,0.15]).T
print("B:")
print(B)
x0=np.matrix([1,-1]).T
print("x0")
print(x0)
Q=np.matrix([[500,0.0],[0.0,100]])
print("Q")
print(Q)
R=np.matrix([1.0])
print("R")
print(R)
P=np.matrix([[1500,0.0],[0.0,100]])
print("P")
print(P)
N=20#Number of horizon
uopt=CalcFiniteHorizonOptimalInput(A,B,Q,R,P,N,x0)
#simulation
u_history=[]
x1_history=[]
x2_history=[]
x=x0
for u in uopt:
u_history.append(float(u[0]))
x=A*x+B*u
x1_history.append(float(x[0]))
x2_history.append(float(x[1]))
plt.plot(u_history,"-r",label="input")
plt.plot(x1_history,"-g",label="x1")
plt.plot(x2_history,"-b",label="x2")
plt.grid(True)
plt.legend()
plt.show()
| AtsushiSakai/PyAdvancedControl | finite_horizon_optimal_control/main.py | Python | mit | 2,743 | 0.032446 |
"""
Sysconfig - files in ``/etc/sysconfig/``
========================================
This is a collection of parsers that all deal with the system's configuration
files under the ``/etc/sysconfig/`` folder. Parsers included in this module
are:
ChronydSysconfig - file ``/etc/sysconfig/chronyd``
--------------------------------------------------
DockerSysconfig - file ``/etc/sysconfig/docker``
------------------------------------------------
HttpdSysconfig - file ``/etc/sysconfig/httpd``
----------------------------------------------
IrqbalanceSysconfig - file ``/etc/sysconfig/irqbalance``
--------------------------------------------------------
KdumpSysconfig - file ``/etc/sysconfig/kdump``
----------------------------------------------
MongodSysconfig - file ``/etc/sysconfig/mongod``
------------------------------------------------
NtpdSysconfig - file ``/etc/sysconfig/ntpd``
--------------------------------------------
VirtWhoSysconfig - file ``/etc/sysconfig/virt-who``
---------------------------------------------------
"""
from .. import parser, SysconfigOptions
from insights.specs import docker_sysconfig
from insights.specs import sysconfig_chronyd
from insights.specs import sysconfig_httpd
from insights.specs import sysconfig_irqbalance
from insights.specs import sysconfig_kdump
from insights.specs import sysconfig_mongod
from insights.specs import sysconfig_ntpd
from insights.specs import sysconfig_virt_who
@parser(sysconfig_chronyd)
class ChronydSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``chronyd`` service config file in the
``/etc/sysconfig`` directory.
Sample Input::
OPTIONS="-d"
#HIDE="me"
Examples:
>>> service_opts = shared[ChronydSysconfig]
>>> 'OPTIONS' in service_opts
True
>>> 'HIDE' in service_opts
False
>>> service_opts['OPTIONS']
'-d'
"""
pass
@parser(sysconfig_ntpd)
class NtpdSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``ntpd`` service config file in the
``/etc/sysconfig`` directory
Sample Input::
OPTIONS="-x -g"
#HIDE="me"
Examples:
>>> service_opts = shared[NTPDService]
>>> 'OPTIONS' in service_opts
True
>>> 'HIDE' in service_opts
False
>>> service_opts['OPTIONS']
'-x -g'
"""
pass
@parser(docker_sysconfig)
class DockerSysconfig(SysconfigOptions):
"""
Class for parsing the ``/etc/sysconfig/docker`` file using the standard
``SysconfigOptions`` parser class. The 'OPTIONS' variable is also provided
in the ``options`` property as a convenience.
Examples:
>>> conf = shared[DockerSysconfig]
>>> 'OPTIONS' in conf
True
>>> conf['OPTIONS']
'--selinux-enabled'
>>> conf.options
'--selinux-enabled'
>>> conf['DOCKER_CERT_PATH']
'/etc/docker'
"""
@property
def options(self):
""" Return the value of the 'OPTIONS' variable, or '' if not defined. """
return self.data.get('OPTIONS', '')
@parser(sysconfig_httpd)
class HttpdSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``httpd`` service config file in the
``/etc/sysconfig`` directory.
Sample Input::
# The default processing model (MPM) is the process-based
# 'prefork' model. A thread-based model, 'worker', is also
# available, but does not work with some modules (such as PHP).
# The service must be stopped before changing this variable.
#
HTTPD=/usr/sbin/httpd.worker
#
# To pass additional options (for instance, -D definitions) to the
# httpd binary at startup, set OPTIONS here.
#
OPTIONS=
Examples:
>>> httpd_syscfg = shared[HttpdSysconfig]
>>> httpd_syscfg['HTTPD']
'/usr/sbin/httpd.worker'
>>> httpd_syscfg.get('OPTIONS')
''
>>> 'NOOP' in httpd_syscfg
False
"""
pass
@parser(sysconfig_irqbalance)
class IrqbalanceSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``irqbalance`` service config file in the
``/etc/sysconfig`` directory.
Sample Input::
#IRQBALANCE_ONESHOT=yes
#
# IRQBALANCE_BANNED_CPUS
# 64 bit bitmask which allows you to indicate which cpu's should
# be skipped when reblancing irqs. Cpu numbers which have their
# corresponding bits set to one in this mask will not have any
# irq's assigned to them on rebalance
#
IRQBALANCE_BANNED_CPUS=f8
IRQBALANCE_ARGS="-d"
Examples:
>>> irqb_syscfg = shared[IRQBalanceSysconfig]
>>> irqb_syscfg['IRQBALANCE_BANNED_CPUS']
'f8'
>>> irqb_syscfg.get('IRQBALANCE_ARGS') # quotes will be stripped
'-d'
>>> irqb_syscfg.get('IRQBALANCE_ONESHOT')
None
>>> 'ONESHOT' in irqb_syscfg
False
"""
pass
@parser(sysconfig_kdump)
class KdumpSysconfig(SysconfigOptions):
"""
Read data from the ``/etc/sysconfig/kdump`` file.
This sets the following properties for ease of access:
* KDUMP_COMMANDLINE
* KDUMP_COMMANDLINE_REMOVE
* KDUMP_COMMANDLINE_APPEND
* KDUMP_KERNELVER
* KDUMP_IMG
* KDUMP_IMG_EXT
* KEXEC_ARGS
These are set to the value of the named variable in the kdump sysconfig
file, or '' if not found.
"""
KDUMP_KEYS = [
'KDUMP_COMMANDLINE',
'KDUMP_COMMANDLINE_REMOVE',
'KDUMP_COMMANDLINE_APPEND',
'KDUMP_KERNELVER',
'KDUMP_IMG',
'KDUMP_IMG_EXT',
'KEXEC_ARGS',
]
def parse_content(self, content):
super(KdumpSysconfig, self).parse_content(content)
for key in self.KDUMP_KEYS:
setattr(self, key, self.data.get(key, ''))
@parser(sysconfig_virt_who)
class VirtWhoSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``virt-who`` service configuration file in the
``/etc/sysconfig`` directory.
Sample Input::
# Register ESX machines using vCenter
# VIRTWHO_ESX=0
# Register guests using RHEV-M
VIRTWHO_RHEVM=1
# Options for RHEV-M mode
VIRTWHO_RHEVM_OWNER=
TEST_OPT="A TEST"
Examples:
>>> vwho_syscfg = shared[VirtWhoSysconfig]
>>> vwho_syscfg['VIRTWHO_RHEVM']
'1'
>>> vwho_syscfg.get('VIRTWHO_RHEVM_OWNER')
''
>>> vwho_syscfg.get('NO_SUCH_OPTION')
None
>>> 'NOSUCHOPTION' in vwho_syscfg
False
>>> vwho_syscfg.get('TEST_OPT') # Quotes are stripped
'A TEST'
"""
pass
@parser(sysconfig_mongod)
class MongodSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``mongod`` service configuration file in
the ``etc/sysconfig`` directory, contains 'etc/sysconfig/mongod' and
'/etc/opt/rh/rh-mongodb26/sysconfig/mongod'.
Sample Input::
OPTIONS="--quiet -f /etc/mongod.conf"
Examples:
>>> mongod_syscfg = shared[MongodWhoSysconfig]
>>> mongod_syscfg.get('OPTIONS')
'--quiet -f /etc/mongod.conf'
>>> mongod_syscfg.get('NO_SUCH_OPTION')
None
>>> 'NOSUCHOPTION' in mongod_syscfg
False
"""
pass
| wcmitchell/insights-core | insights/parsers/sysconfig.py | Python | apache-2.0 | 7,321 | 0.000137 |
# -*- Mode: Python; test-case-name:flumotion.test.test_worker_worker -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from twisted.internet import defer
from twisted.spread import pb
from flumotion.common import testsuite
from flumotion.test import realm
from flumotion.twisted import pb as fpb
from flumotion.worker import medium
class TestWorkerAvatar(fpb.PingableAvatar):
def __init__(self, avatarId, mind):
fpb.PingableAvatar.__init__(self, avatarId)
self.setMind(mind)
class TestWorkerRealm(realm.TestRealm):
deferredAvatar = None
deferredLogout = None
def getDeferredAvatar(self):
if self.deferredAvatar is None:
self.deferredAvatar = defer.Deferred()
return self.deferredAvatar
def getDeferredLogout(self):
if self.deferredLogout is None:
self.deferredLogout = defer.Deferred()
return self.deferredLogout
def requestAvatar(self, avatarId, keycard, mind, *ifaces):
avatar = TestWorkerAvatar(avatarId, mind)
self.getDeferredAvatar().callback(avatar)
return (pb.IPerspective, avatar,
lambda: self.avatarLogout(avatar))
def avatarLogout(self, avatar):
self.debug('worker logged out: %s', avatar.avatarId)
self.getDeferredLogout().callback(avatar)
class TestWorkerMedium(testsuite.TestCase):
def setUp(self):
self.realm = TestWorkerRealm()
def tearDown(self):
return self.realm.shutdown()
def testConnect(self):
m = medium.WorkerMedium(None)
connectionInfo = self.realm.getConnectionInfo()
connectionInfo.authenticator.avatarId = 'foo'
m.startConnecting(connectionInfo)
def connected(avatar):
m.stopConnecting()
return self.realm.getDeferredLogout()
def disconnected(avatar):
self.assertEquals(avatar.avatarId, 'foo')
d = self.realm.getDeferredAvatar()
d.addCallback(connected)
d.addCallback(disconnected)
return d
| timvideos/flumotion | flumotion/test/test_worker_medium.py | Python | lgpl-2.1 | 2,617 | 0 |
from django.contrib import admin
from .models import (
Log,
RequestLog,
EventLog,
)
class LogAdmin(admin.ModelAdmin):
readonly_fields = [
'log_level', 'request_url', 'request_method', 'get_data',
'request_body', 'cookies', 'meta',
'exception_type', 'message', 'stack_trace', 'user_id',
'user_name', 'request_browser', 'request_os', 'request_device',
'response_body', 'response_status', 'response_headers', 'response_content_type',
'is_mobile', 'is_tablet', 'is_touch_capable', 'is_pc',
'is_bot', 'created_on']
def has_add_permission(self, request):
return False
class RequestLogAdmin(admin.ModelAdmin):
readonly_fields = [
'method', 'url', 'request_data', 'request_headers',
'response_text', 'response_status', 'response_reason',
'response_time', 'created_on']
def has_add_permission(self, request):
return False
class EventLogAdmin(admin.ModelAdmin):
readonly_fields = [
'log_level', 'message', 'stack_trace', 'tag',
'created_on']
def has_add_permission(self, request):
return False
admin.site.register(Log, LogAdmin)
admin.site.register(RequestLog, RequestLogAdmin)
admin.site.register(EventLog, EventLogAdmin)
| eshandas/simple_django_logger | simple_django_logger/admin.py | Python | mit | 1,281 | 0.000781 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's repository API in product environments"""
import unittest
from trac.resource import Resource, get_resource_description, get_resource_url
from trac.versioncontrol.api import Repository
from trac.versioncontrol.tests.api import ResourceManagerTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductResourceManagerTestCase(ResourceManagerTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_resource_changeset(self):
res = Resource('changeset', '42')
self.assertEqual('Changeset 42', get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/changeset/42',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('changeset', '42', parent=repo)
self.assertEqual('Changeset 42 in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/changeset/42/repo',
get_resource_url(self.env, res, self.env.href))
def test_resource_source(self):
res = Resource('source', '/trunk/src')
self.assertEqual('path /trunk/src',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/trunk/src',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('source', '/trunk/src', parent=repo)
self.assertEqual('path /trunk/src in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/repo/trunk/src',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('source', '/trunk/src', version=42, parent=repo)
self.assertEqual('path /trunk/src@42 in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/repo/trunk/src?rev=42',
get_resource_url(self.env, res, self.env.href))
def test_resource_repository(self):
res = Resource('repository', 'testrepo')
self.assertEqual('Repository testrepo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/testrepo',
get_resource_url(self.env, res, self.env.href))
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductResourceManagerTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache/bloodhound | bloodhound_multiproduct/tests/versioncontrol/api.py | Python | apache-2.0 | 4,299 | 0.001163 |
'''
Provides several CacheStore backends for Cheetah's caching framework. The
methods provided by these classes have the same semantics as those in the
python-memcached API, except for their return values:
set(key, val, time=0)
set the value unconditionally
add(key, val, time=0)
set only if the server doesn't already have this key
replace(key, val, time=0)
set only if the server already have this key
get(key, val)
returns val or raises a KeyError
delete(key)
deletes or raises a KeyError
'''
import time
from Cheetah.Utils.memcache import Client as MemcachedClient
class Error(Exception):
pass
class AbstractCacheStore(object):
def set(self, key, val, time=None):
raise NotImplementedError
def add(self, key, val, time=None):
raise NotImplementedError
def replace(self, key, val, time=None):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class MemoryCacheStore(AbstractCacheStore):
def __init__(self):
self._data = {}
def set(self, key, val, time=0):
self._data[key] = (val, time)
def add(self, key, val, time=0):
if self._data.has_key(key):
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
if self._data.has_key(key):
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
del self._data[key]
def get(self, key):
(val, exptime) = self._data[key]
if exptime and time.time() > exptime:
del self._data[key]
raise KeyError(key)
else:
return val
def clear(self):
self._data.clear()
class MemcachedCacheStore(AbstractCacheStore):
servers = ('127.0.0.1:11211')
def __init__(self, servers=None, debug=False):
if servers is None:
servers = self.servers
self._client = MemcachedClient(servers, debug)
def set(self, key, val, time=0):
self._client.set(key, val, time)
def add(self, key, val, time=0):
res = self._client.add(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
res = self._client.replace(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
res = self._client.delete(key, time=0)
if not res:
raise KeyError(key)
def get(self, key):
val = self._client.get(key)
if val is None:
raise KeyError(key)
else:
return val
def clear(self):
self._client.flush_all()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/Cheetah-2.2.2-py2.7-linux-x86_64-ucs4.egg/Cheetah/CacheStore.py | Python | gpl-3.0 | 3,059 | 0.006538 |
import sublime, sublime_plugin
#
# Classic emacs mark ring with multi-cursor support. Each entry in the ring is implemented
# with a named view region with an index, so that the marks are adjusted automatically by
# Sublime. The special region called "jove_mark" is used to display the current mark. It's
# a copy of the current mark with gutter display properties turned on.
#
# Each entry is an array of 1 or more regions.
#
class MarkRing:
MARK_RING_SIZE = 16
def __init__(self, view):
self.view = view
self.index = 0
# in case any left over from before
self.view.erase_regions("jove_mark")
for i in range(self.MARK_RING_SIZE):
self.view.erase_regions(self.get_key(i))
def get_key(self, index):
return "jove_mark:" + str(index)
def clear(self):
self.view.erase_regions("jove_mark")
def has_visible_mark(self):
return self.view.get_regions("jove_mark") != None and len(self.view.get_regions("jove_mark")) > 0
#
# Update the display to show the current mark.
#
def display(self):
# display the mark's dot
regions = self.get()
if regions is not None:
self.view.add_regions("jove_mark", regions, "mark", "dot", sublime.HIDDEN)
#
# Get the current mark(s).
#
def get(self):
return self.view.get_regions(self.get_key(self.index))
#
# Set the mark to pos. If index is supplied we overwrite that mark, otherwise we push to the
# next location.
#
def set(self, regions, reuse_index=False):
if self.get() == regions:
# don't set another mark in the same place
return
if not reuse_index:
self.index = (self.index + 1) % self.MARK_RING_SIZE
self.view.add_regions(self.get_key(self.index), regions, "mark", "", sublime.HIDDEN)
self.display()
#
# Exchange the current mark with the specified pos, and return the current mark.
#
def exchange(self, regions):
current = self.get()
if current is not None:
self.set(regions, True)
return current
#
# Pops the current mark from the ring and returns it. The caller sets point to that value. The
# new mark is the previous mark on the ring.
#
def pop(self):
regions = self.get()
# find a non-None mark in the ring
start = self.index
while True:
self.index -= 1
if self.index < 0:
self.index = self.MARK_RING_SIZE - 1
if self.get() or self.index == start:
break
self.display()
return regions
| grundprinzip/sublemacspro | lib/mark_ring.py | Python | bsd-3-clause | 2,686 | 0.004468 |
#!/usr/bin/python
# coding: utf-8
import numpy as np
from keras.models import Sequential, Model, load_model
from keras.optimizers import SGD,Adagrad,RMSprop,Adam
from keras.layers import Dense, Input, Activation
from keras.layers import BatchNormalization, Add, Dropout
from keras import optimizers
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU,LeakyReLU
from keras.utils import np_utils, generic_utils
from sklearn.base import BaseEstimator
import types
import tempfile
import keras.models
from keras import callbacks
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
'''
From kaggle forum:
NN is the average of 30 neural networks with the same parameters fed by x^(2/3) transformed features and by results of KNN with N = 27 (KNN gained .002 for my best solution). NN was implemented on Keras, I've found this library very nice and fast (with CUDA-enabled Theano). Layers were (512,256,128), the score was .428
Dropout(.15) -> Dense(n_in, l1, activation='tanh') -> BatchNormalization((l1,)) -> Dropout(.5) -> Dense(l1, l2) -> PReLU((l2,)) -> BatchNormalization((l2,)) -> Dropout(.3) -> Dense(l2, l3) -> PReLU((l3,)) -> BatchNormalization((l3,)) -> Dropout(.1) -> Dense(l3, n_out) -> Activation('softmax')
sgd = SGD(lr=0.004, decay=1e-7, momentum=0.99, nesterov=True)
Rossmann 3d place: https://github.com/entron/category-embedding-rossmann/blob/master/models.py "categorical embedding"
avito challenge https://www.kaggle.com/rightfit/avito-duplicate-ads-detection/get-hash-from-images/code
'''
def RMSE(y_true, y_pred):
loss = T.sqrt(T.sqr(y_true - y_pred).mean(axis=-1))
#print(loss)
return loss
def make_keras_picklable():
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = { 'model_str': model_str }
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
#https://gist.github.com/MaxHalford/9bfaa8daf8b4bc17a7fb7ba58c880675#file-fit-py
early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=1, verbose=0, mode='auto')
def create_classification_model(input_dim=64,learning_rate=0.001,activation='relu',batchnorm=False,layers=[256,256],dropouts=[0.0,0.0],optimizer=None):
# create model
model = Sequential()
for i,(layer,dropout) in enumerate(zip(layers,dropouts)):
if i==0:
model.add(Dense(layer, input_dim=input_dim, kernel_initializer='uniform'))
if batchnorm: model.add(BatchNormalization()) # problem with CUDA?
model.add(Activation(activation))
model.add(Dropout(dropout))
else:
model.add(Dense(layer, kernel_initializer='uniform'))
if batchnorm: model.add(BatchNormalization())
model.add(Activation(activation))
model.add(Dropout(dropout))
if batchnorm: model.add(BatchNormalization())
model.add(Dense(1, kernel_initializer='uniform',activation='sigmoid'))
# Compile model
if optimizer is None:
optimizer = optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False) # normal
elif 'adam' in optimizer:
optimizer = optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # deep nets
elif 'adadelta' in optimizer:
optimizer = optimizers.Adadelta(lr=learning_rate, rho=0.95, epsilon=1e-08, decay=0.0)
elif 'adagrad' in optimizer:
optimizer = Adagrad(lr=self.learning_rate)
else:
optimizer = optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False) # normal
model.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])
return model
def create_regression_model_old(input_dim=64,learning_rate=0.001,activation='sigmoid',layers=[256,256],dropouts=[0.0,0.0],loss='mean_absolute_error',optimizer=None):
# create model
model = Sequential()
for i,(layer,dropout) in enumerate(zip(layers,dropouts)):
if i==0:
model.add(Dropout(dropout))
model.add(Dense(layer, input_dim=input_dim, kernel_initializer='normal', activation=activation))
else:
model.add(Dropout(dropout))
model.add(Dense(layer, kernel_initializer='normal', activation=activation))
model.add(Dense(1, kernel_initializer='normal',activation='linear'))
# Compile model
#model.compile(loss='mean_squared_error', optimizer=optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0))
#model.compile(loss='mean_squared_error', optimizer=Adagrad(lr=self.learning_rate) # 0.01
if optimizer is None:
optimizer = optimizers.RMSprop(lr=learning_rate)
model.compile(loss=loss,optimizer=optimizer)
return model
def create_regression_model(input_dim=64,learning_rate=0.001,layers=[256,256],dropouts=[0.0,0.0],loss='mean_absolute_error',optimizer=None):
inp = Input(shape=(input_dim,))
for i,(layer,dropout) in enumerate(zip(layers,dropouts)):
x = Dense(layer)(inp)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(dropout)(x)
out = Dense(1, activation="linear")(x)
model = Model(inputs=inp, outputs=[out])
if optimizer is None:
#optimizer = optimizers.RMSprop(lr=learning_rate)
optimizer = Adam()
model.compile(loss=loss, optimizer=optimizer)
return model
class KerasNN(BaseEstimator):
def __init__(self, dims=66, nb_classes=1, nb_epoch=30, learning_rate=0.5, validation_split=0.0, batch_size=64,
loss='categorical_crossentropy', layers=[32,32], activation='relu', dropout=[0.2,0.2],verbose=1):
self.dims = dims
self.nb_classes = nb_classes
self.classes_ = None # list containing classes
self.nb_epoch = nb_epoch
self.learning_rate = learning_rate
self.validation_split = validation_split
self.batch_size = batch_size
self.loss = loss
self.layers = layers
self.activation = activation
self.dropout = dropout
self.verbose = verbose
self.hist = ""
self.model = Sequential()
# Keras model
for i,dropout in enumerate(self.dropout):
if i>0:
dims = self.layers[i-1]
if 'maxout' in self.activation:
print("Currently not implemented...")
#self.model.add(MaxoutDense(output_dim=layers[i], nb_feature=4, input_dim=dims))
else:
self.model.add(Dense(output_dim=layers[i], input_dim=dims, init='glorot_uniform'))
#https://www.reddit.com/r/MachineLearning/comments/22u1yt/is_deep_learning_basically_just_neural_networks/
#https://www.kaggle.com/c/job-salary-prediction/forums/t/4208/congratulations-to-the-preliminary-winners?page=2
if 'PReLU' in self.activation:
self.model.add(PReLU())
elif 'LeakyReLU' in self.activation:
self.model.add(LeakyReLU(alpha=0.3))
else:
self.model.add(Activation(self.activation))
self.model.add(BatchNormalization())
if dropout>1E-15:
self.model.add(Dropout(dropout))
if 'categorical_crossentropy' in loss:
self.model.add(Dense(output_dim=nb_classes))
self.model.add(Activation('softmax'))
self.model.compile(loss=loss, optimizer="adadelta")
else:
self.model.add(Dense(output_dim=1))
self.model.add(Activation('linear'))
#optimizer = Adagrad(lr=self.learning_rate) # 0.01
#optimizer = Adagrad()
print("Learning rate:",self.learning_rate)
optimizer = RMSprop(lr=self.learning_rate) # 0.001
#optimizer = RMSprop()
if 'rmse' in self.loss:
self.model.compile(loss=RMSE, optimizer=optimizer)
else:
self.model.compile(loss=self.loss, optimizer=optimizer)
# tanh better for regression?
#sgd = SGD(lr=self.learning_rate, decay=1e-7, momentum=0.99, nesterov=True)
print('Compiling Keras Deep Net with loss: %s and activation: %s' % (str(self.loss),self.activation))
def fit(self, X, y, sample_weight=None):
print('Fitting Keras Deep Net for regression with batch_size %d, epochs %d and learning rate: %f' % (
self.batch_size, self.nb_epoch, self.learning_rate))
if self.nb_classes>1:
y = np_utils.to_categorical(y)
self.classes_ = np.unique(y)
y = np.reshape(y,(y.shape[0],-1))
#pandas hack
if not isinstance(X,np.ndarray):
X = X.values
self.model.fit(X, y,batch_size=self.batch_size, nb_epoch=self.nb_epoch, verbose=self.verbose,callbacks=[],
validation_split=self.validation_split,validation_data=None,shuffle=True,class_weight=None,sample_weight=sample_weight)
def predict_proba(self, X):
if not isinstance(X,np.ndarray):
X = X.values
ypred = self.model.predict_proba(X, batch_size=self.batch_size, verbose=self.verbose)
return ypred
def predict(self, X):
if not isinstance(X,np.ndarray):
X = X.values
ypred = self.model.predict(X, batch_size=self.batch_size, verbose=self.verbose)
if self.nb_classes>1:
ypred = np_utils.probas_to_classes(ypred)
else:
ypred = ypred.flatten()
return ypred
def save_model(self,filename):
self.model.save(filename)
def load_model(self,filename):
self.model = load_model(filename)
| chrissly31415/amimanera | keras_tools.py | Python | lgpl-3.0 | 10,881 | 0.01351 |
"""Read in lightcurve files."""
import logging
import numpy as np
import astropy.io.ascii as at
def read_single_aperture(filename):
"""Read in one of AMC's K2 light curves,
inputs
------
filename: string
should look like EPIC_205030103_xy_ap1.5_fixbox_cleaned.dat
outputs
-------
time, flux, unc_flux, x_pos, y_pos, qual_flux: arrays
aperture: float
"""
# Read in the file
lc = at.read(filename, delimiter=' ',data_start=1)
split_filename = filename.split("/")[-1].split('_')
logging.debug(split_filename)
if split_filename[0]=="EPIC":
epicID = split_filename[1]
else:
epicID = split_filename[0]
aperture = split_filename[3]
if aperture.startswith("ap"):
aperture = aperture[2:]
if aperture.endswith(".dat"):
aperture = aperture[:-4]
# Extract the useful columns
time = lc["Dates"]
flux = lc["Flux"]
try:
unc_flux = lc["Uncert{}".format(aperture)]
except:
unc_flux = np.ones_like(flux)
x_pos = lc["Xpos"]
y_pos = lc["Ypos"]
try:
qual_flux = lc["Quality"]
except:
qual_flux = np.ones_like(flux)
aperture = float(aperture)
# Return the columns
return time, flux, unc_flux, x_pos, y_pos, qual_flux, aperture
def read_double_aperture(filename):
"""Read in one of AMC's K2 lc files with 2 aperture extractions.
inputs
------
filename: string
should look like EPIC_205030103_xy_ap#.#_#.#_fixbox.dat
outputs
-------
time: array
flux, unc_flux: arrays, shape=(2, n_datapoints)
A flux and uncertainty array for each aperture in the file
x_pos, y_pos, qual_flux: arrays
apertures: array, length=2
The apertures contained in the file
"""
# Read in the file
lc = at.read(filename, delimiter=' ',data_start=1)
split_filename = filename.split("/")[-1].split('_')
logging.debug(split_filename)
epicID = split_filename[1]
# Extract the useful columns
time = lc["Dates"]
fluxes = np.array([lc["Flux5"], lc["Flux3"]])
unc_fluxes = np.array([lc["Uncert5"], lc["Uncert3"]])
apertures = np.array([5.,3.])
x_pos = lc["Xpos"]
y_pos = lc["Ypos"]
qual_flux = lc["Quality"]
# Return the columns
return time, fluxes, unc_fluxes, x_pos, y_pos, qual_flux, apertures
def read_list(file_list):
"""Read in a list of lightcurve filenames."""
pass
| stephtdouglas/k2spin | k2io.py | Python | mit | 2,475 | 0.006061 |
import numpy as np
import eos
import h5py
# This script converts the Basel Face Model 2017 (BFM2017, [1]) to the eos model format,
# specifically the files model2017-1_face12_nomouth.h5 and model2017-1_bfm_nomouth.h5 from the BFM2017 download.
#
# The BFM2017 does not come with texture (uv-) coordinates. If you have texture coordinates for the BFM, they can be
# added to the eos.morphablemodel.MorphableModel(...) constructor in the third argument. Note that eos only supports one
# uv-coordinate per vertex.
#
# [1]: Morphable Face Models - An Open Framework,
# T. Gerig, A. Morel-Forster, C. Blumer, B. Egger, M. Lüthi, S. Schönborn and T. Vetter,
# arXiv preprint, 2017.
# http://faces.cs.unibas.ch/bfm/bfm2017.html
# Set this to the path of the model2017-1_bfm_nomouth.h5 or model2017-1_face12_nomouth.h5 file from the BFM2017 download:
bfm2017_file = r"./model2017-1_bfm_nomouth.h5"
with h5py.File(bfm2017_file, 'r') as hf:
# The PCA shape model:
shape_mean = np.array(hf['shape/model/mean'])
shape_orthogonal_pca_basis = np.array(hf['shape/model/pcaBasis'])
# Their basis is unit norm: np.linalg.norm(shape_pca_basis[:,0]) == ~1.0
# And the basis vectors are orthogonal: np.dot(shape_pca_basis[:,0], shape_pca_basis[:,0]) == 1.0
# np.dot(shape_pca_basis[:,0], shape_pca_basis[:,1]) == 1e-10
shape_pca_variance = np.array(hf['shape/model/pcaVariance']) # the PCA variances are the eigenvectors
triangle_list = np.array(hf['shape/representer/cells'])
shape_model = eos.morphablemodel.PcaModel(shape_mean, shape_orthogonal_pca_basis, shape_pca_variance,
triangle_list.transpose().tolist())
# PCA colour model:
color_mean = np.array(hf['color/model/mean'])
color_orthogonal_pca_basis = np.array(hf['color/model/pcaBasis'])
color_pca_variance = np.array(hf['color/model/pcaVariance'])
color_model = eos.morphablemodel.PcaModel(color_mean, color_orthogonal_pca_basis, color_pca_variance,
triangle_list.transpose().tolist())
# PCA expression model:
expression_mean = np.array(hf['expression/model/mean'])
expression_pca_basis = np.array(hf['expression/model/pcaBasis'])
expression_pca_variance = np.array(hf['expression/model/pcaVariance'])
expression_model = eos.morphablemodel.PcaModel(expression_mean, expression_pca_basis, expression_pca_variance,
triangle_list.transpose().tolist())
# Construct and save an eos model from the BFM data:
model = eos.morphablemodel.MorphableModel(shape_model, expression_model, color_model, vertex_definitions=None,
texture_coordinates=[],
texture_triangle_indices=[]) # uv-coordinates can be added here
eos.morphablemodel.save_model(model, "bfm2017-1_bfm_nomouth.bin")
print("Converted and saved model as bfm2017-1_bfm_nomouth.bin.")
| patrikhuber/eos | share/scripts/convert-bfm2017-to-eos.py | Python | apache-2.0 | 3,067 | 0.005546 |
from __future__ import absolute_import
from django.conf import settings
from django.template import Library
from sentry import options
from sentry.utils.assets import get_asset_url
from sentry.utils.http import absolute_uri
register = Library()
register.simple_tag(get_asset_url, name='asset_url')
@register.simple_tag
def absolute_asset_url(module, path):
"""
Returns a versioned absolute asset URL (located within Sentry's static files).
Example:
{% absolute_asset_url 'sentry' 'dist/sentry.css' %}
=> "http://sentry.example.com/_static/74d127b78dc7daf2c51f/sentry/dist/sentry.css"
"""
return absolute_uri(get_asset_url(module, path))
@register.simple_tag
def crossorigin():
"""
Returns an additional crossorigin="anonymous" snippet for use in a <script> tag if
our asset urls are from a different domain than the system.url-prefix.
"""
if absolute_uri(settings.STATIC_URL).startswith(options.get('system.url-prefix')):
# They share the same domain prefix, so we don't need CORS
return ''
return ' crossorigin="anonymous"'
@register.simple_tag(takes_context=True)
def locale_js_include(context):
"""
If the user has a non-English locale set, returns a <script> tag pointing
to the relevant locale JavaScript file
"""
request = context['request']
try:
lang_code = request.LANGUAGE_CODE
except AttributeError:
# it's possible that request at this point, LANGUAGE_CODE hasn't be bound
# to the Request object yet. This specifically happens when rendering our own
# 500 error page, resulting in yet another error trying to render our error.
return ''
if lang_code == 'en' or lang_code not in settings.SUPPORTED_LANGUAGES:
return ''
href = get_asset_url("sentry", "dist/locale/" + lang_code + ".js")
return "<script src=\"{0}\"{1}></script>".format(href, crossorigin())
| JackDanger/sentry | src/sentry/templatetags/sentry_assets.py | Python | bsd-3-clause | 1,943 | 0.003603 |
# -*- coding: UTF-8 -*-
from collections import defaultdict
from decimal import Decimal
from django import forms
from django import http
from django.conf import settings
from django.conf.urls import url, patterns
from django.contrib import admin
from django.core import urlresolvers
from django.db.models import Q
from django.contrib.auth.models import User
from assopy import admin as aadmin
from assopy import models as amodels
from assopy import stats as astats
from assopy import utils as autils
from conference import admin as cadmin
from conference import models as cmodels
from conference import forms as cforms
from p3 import models
from p3 import dataaccess
from p3 import utils
### Customg list filters
class DiscountListFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = 'discounts'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'discounts'
def lookups(self, request, model_admin):
return (
('yes', 'With discounts'),
('no', 'Regular order'),
)
def queryset(self, request, queryset):
if self.value() == 'yes':
return queryset.filter(orderitem__price__lt=0)
elif self.value() == 'no':
return queryset.exclude(orderitem__price__lt=0)
###
_TICKET_CONFERENCE_COPY_FIELDS = ('shirt_size', 'python_experience', 'diet', 'tagline', 'days', 'badge_image')
def ticketConferenceForm():
class _(forms.ModelForm):
class Meta:
model = models.TicketConference
fields = '__all__'
fields = _().fields
class TicketConferenceForm(forms.ModelForm):
shirt_size = fields['shirt_size']
python_experience = fields['python_experience']
diet = fields['diet']
tagline = fields['tagline']
days = fields['days']
badge_image = fields['badge_image']
class Meta:
model = cmodels.Ticket
fields = '__all__'
def __init__(self, *args, **kw):
if 'instance' in kw:
o = kw['instance']
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
pass
else:
if p3c:
initial = kw.pop('initial', {})
for k in _TICKET_CONFERENCE_COPY_FIELDS:
initial[k] = getattr(p3c, k)
kw['initial'] = initial
return super(TicketConferenceForm, self).__init__(*args, **kw)
return TicketConferenceForm
class TicketConferenceAdmin(cadmin.TicketAdmin):
list_display = cadmin.TicketAdmin.list_display + (
'frozen',
'_order',
'_order_date',
'_assigned',
'_shirt_size',
'_diet',
'_python_experience',
#'_tagline',
)
list_select_related = True
list_filter = cadmin.TicketAdmin.list_filter + (
'fare__code',
'orderitem__order___complete',
'frozen',
'p3_conference__shirt_size',
'p3_conference__diet',
'p3_conference__python_experience',
'orderitem__order__created',
)
search_fields = cadmin.TicketAdmin.search_fields + (
'orderitem__order__code',
'fare__code',
)
actions = cadmin.TicketAdmin.actions + (
'do_assign_to_buyer',
'do_update_ticket_name',
)
form = ticketConferenceForm()
class Media:
js = ('p5/j/jquery-flot/jquery.flot.js',)
def _order(self, obj):
url = urlresolvers.reverse('admin:assopy_order_change',
args=(obj.orderitem.order.id,))
return '<a href="%s">%s</a>' % (url, obj.orderitem.order.code)
_order.allow_tags = True
def _order_date(self, o):
return o.orderitem.order.created
_order_date.admin_order_field = 'orderitem__order__created'
def _assigned(self, ticket):
if ticket.p3_conference:
assigned_to = ticket.p3_conference.assigned_to
if assigned_to:
comment = ''
user = None
try:
user = autils.get_user_account_from_email(assigned_to)
except User.MultipleObjectsReturned:
comment = ' (email not unique)'
except User.DoesNotExist:
try:
user = autils.get_user_account_from_email(assigned_to,
active_only=False)
except User.DoesNotExist:
comment = ' (does not exist)'
else:
comment = ' (user inactive)'
if user is not None:
url = urlresolvers.reverse('admin:auth_user_change',
args=(user.id,))
user_name = ('%s %s' %
(user.first_name, user.last_name)).strip()
if not user_name:
user_name = assigned_to
comment += ' (no name set)'
return '<a href="%s">%s</a>%s' % (url, user_name, comment)
elif not comment:
comment = ' (missing user account)'
return '%s%s' % (assigned_to, comment)
else:
return '(not assigned)'
else:
return '(old style ticket)'
_assigned.allow_tags = True
_assigned.admin_order_field = 'p3_conference__assigned_to'
def do_assign_to_buyer(self, request, queryset):
if not queryset:
self.message_user(request, 'no tickets selected', level='error')
return
for ticket in queryset:
# Assign to buyer
utils.assign_ticket_to_user(ticket, ticket.user)
do_assign_to_buyer.short_description = 'Assign to buyer'
def do_update_ticket_name(self, request, queryset):
if not queryset:
self.message_user(request, 'no tickets selected')
return
for ticket in queryset:
# Find selected user
if not ticket.p3_conference:
continue
assigned_to = ticket.p3_conference.assigned_to
try:
user = autils.get_user_account_from_email(assigned_to)
except User.MultipleObjectsReturned:
self.message_user(request,
'found multiple users with '
'email address %s' % assigned_to,
level='error')
return
except User.DoesNotExist:
self.message_user(request,
'no user record found or user inactive for '
' email address %s' % assigned_to,
level='error')
return
if user is None:
self.message_user(request,
'no user record found for '
' email address %s' % assigned_to,
level='error')
# Reassign to selected user
utils.assign_ticket_to_user(ticket, user)
do_update_ticket_name.short_description = 'Update ticket name'
def _shirt_size(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
return p3c.shirt_size
def _diet(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
return p3c.diet
def _python_experience(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
return p3c.python_experience
_python_experience.admin_order_field = 'p3_conference__python_experience'
def _tagline(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
html = p3c.tagline
if p3c.badge_image:
i = ['<img src="%s" width="24" />' % p3c.badge_image.url] * p3c.python_experience
html += '<br />' + ' '.join(i)
return html
_tagline.allow_tags = True
def save_model(self, request, obj, form, change):
obj.save()
try:
p3c = obj.p3_conference
except models.TicketConference.DoesNotExist:
p3c = None
if p3c is None:
p3c = models.TicketConference(ticket=obj)
data = form.cleaned_data
for k in _TICKET_CONFERENCE_COPY_FIELDS:
setattr(p3c, k, data.get(k))
p3c.save()
def changelist_view(self, request, extra_context=None):
if not request.GET:
q = request.GET.copy()
q['fare__conference'] = settings.CONFERENCE_CONFERENCE
q['fare__ticket_type__exact'] = 'conference'
q['orderitem__order___complete__exact'] = 1
q['frozen__exact'] = 0
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
return super(TicketConferenceAdmin,self).changelist_view(request, extra_context=extra_context)
def get_queryset(self, request):
qs = super(TicketConferenceAdmin, self).get_queryset(request)
qs = qs.select_related('orderitem__order', 'p3_conference', 'user', 'fare', )
return qs
def get_urls(self):
urls = super(TicketConferenceAdmin, self).get_urls()
my_urls = patterns('',
url(r'^stats/data/$', self.admin_site.admin_view(self.stats_data), name='p3-ticket-stats-data'),
)
return my_urls + urls
def stats_data(self, request):
from conference.views import json_dumps
from django.db.models import Q
from collections import defaultdict
from microblog.models import PostContent
import datetime
conferences = cmodels.Conference.objects\
.order_by('conference_start')
output = {}
for c in conferences:
tickets = cmodels.Ticket.objects\
.filter(fare__conference=c)\
.filter(Q(orderitem__order___complete=True) | Q(orderitem__order__method__in=('bank', 'admin')))\
.select_related('fare', 'orderitem__order')
data = {
'conference': defaultdict(lambda: 0),
'partner': defaultdict(lambda: 0),
'event': defaultdict(lambda: 0),
'other': defaultdict(lambda: 0),
}
for t in tickets:
tt = t.fare.ticket_type
date = t.orderitem.order.created.date()
offset = date - c.conference_start
data[tt][offset.days] += 1
for k, v in data.items():
data[k] = sorted(v.items())
dlimit = datetime.date(c.conference_start.year, 1, 1)
deadlines = cmodels.DeadlineContent.objects\
.filter(language='en')\
.filter(deadline__date__lte=c.conference_start, deadline__date__gte=dlimit)\
.select_related('deadline')\
.order_by('deadline__date')
markers = [ ((d.deadline.date - c.conference_start).days, 'CAL: ' + (d.headline or d.body)) for d in deadlines ]
posts = PostContent.objects\
.filter(language='en')\
.filter(post__date__lte=c.conference_start, post__date__gte=dlimit)\
.filter(post__status='P')\
.select_related('post')\
.order_by('post__date')
markers += [ ((d.post.date.date() - c.conference_start).days, 'BLOG: ' + d.headline) for d in posts ]
output[c.code] = {
'data': data,
'markers': markers,
}
return http.HttpResponse(json_dumps(output), 'text/javascript')
admin.site.unregister(cmodels.Ticket)
admin.site.register(cmodels.Ticket, TicketConferenceAdmin)
class SpeakerAdmin(cadmin.SpeakerAdmin):
list_display = cadmin.SpeakerAdmin.list_display + (
)
list_filter = (
'p3_speaker__first_time',
)
def get_queryset(self, request):
# XXX: waiting to upgrade to django 1.4, I'm implementing
# this bad hack filter to keep only speakers of current conference.
qs = super(SpeakerAdmin, self).get_queryset(request)
qs = qs.filter(user__in=(
cmodels.TalkSpeaker.objects\
.filter(talk__conference=settings.CONFERENCE_CONFERENCE)\
.values('speaker')
))
return qs
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
sids = queryset.values_list('user', flat=True)
profiles = dataaccess.profiles_data(sids)
self._profiles = dict(zip(sids, profiles))
return super(SpeakerAdmin, self).get_paginator(request, queryset, per_page, orphans, allow_empty_first_page)
def _avatar(self, o):
return '<img src="%s" height="32" />' % (self._profiles[o.user_id]['image'],)
_avatar.allow_tags = True
admin.site.unregister(cmodels.Speaker)
admin.site.register(cmodels.Speaker, SpeakerAdmin)
class DonationAdmin(admin.ModelAdmin):
list_display = ('_name', 'date', 'amount')
list_select_related = True
search_fields = ('user__user__first_name', 'user__user__last_name', 'user__user__email')
date_hierarchy = 'date'
def _name(self, o):
return o.user.name()
_name.short_description = 'name'
_name.admin_order_field = 'user__user__first_name'
admin.site.register(models.Donation, DonationAdmin)
class HotelBookingAdmin(admin.ModelAdmin):
list_display = ('conference', 'booking_start', 'booking_end', 'minimum_night')
admin.site.register(models.HotelBooking, HotelBookingAdmin)
class HotelRoomAdmin(admin.ModelAdmin):
list_display = ('_conference', 'room_type', 'quantity', 'amount',)
list_editable = ('quantity', 'amount',)
list_filter = ('booking__conference',)
list_select_related = True
def _conference(self, o):
return o.booking.conference_id
def get_urls(self):
urls = super(HotelRoomAdmin, self).get_urls()
my_urls = patterns('',
url(r'^tickets/$', self.admin_site.admin_view(self.ticket_list), name='p3-hotelrooms-tickets-data'),
)
return my_urls + urls
def ticket_list(self, request):
from conference.views import json_dumps
day_ix = int(request.GET['day'])
room_type = request.GET['type']
rdays = models.TicketRoom.objects.reserved_days()
day = rdays[day_ix]
qs = models.TicketRoom.objects.valid_tickets()\
.filter(room_type__room_type=room_type, checkin__lte=day, checkout__gte=day)\
.select_related('ticket__user', 'ticket__orderitem__order')\
.order_by('ticket__orderitem__order__created')
output = []
for row in qs:
user = row.ticket.user
order = row.ticket.orderitem.order
name = u'{0} {1}'.format(user.first_name, user.last_name)
if row.ticket.name and row.ticket.name != name:
name = u'{0} ({1})'.format(row.ticket.name, name)
output.append({
'user': {
'id': user.id,
'name': name,
},
'order': {
'id': order.id,
'code': order.code,
'method': order.method,
'complete': order._complete,
},
'period': (row.checkin, row.checkout, row.checkout == day),
})
return http.HttpResponse(json_dumps(output), 'text/javascript')
admin.site.register(models.HotelRoom, HotelRoomAdmin)
class TicketRoomAdmin(admin.ModelAdmin):
list_display = ('_user', '_room_type', 'ticket_type', 'checkin', 'checkout', '_order_code', '_order_date', '_order_confirmed')
list_select_related = True
search_fields = ('ticket__user__first_name', 'ticket__user__last_name', 'ticket__user__email', 'ticket__orderitem__order__code')
raw_id_fields = ('ticket', )
list_filter = ('room_type__room_type',)
def _user(self, o):
return o.ticket.user
def _room_type(self, o):
return o.room_type.get_room_type_display()
def _order_code(self, o):
return o.ticket.orderitem.order.code
def _order_date(self, o):
return o.ticket.orderitem.order.created
def _order_confirmed(self, o):
return o.ticket.orderitem.order._complete
_order_confirmed.boolean = True
admin.site.register(models.TicketRoom, TicketRoomAdmin)
class InvoiceAdmin(aadmin.InvoiceAdmin):
"""
Specializzazione per gestire il download delle fatture generate con genro
"""
def _invoice(self, i):
if i.assopy_id:
fake = not i.payment_date
view = urlresolvers.reverse('genro-legacy-invoice', kwargs={'assopy_id': i.assopy_id})
return '<a href="%s">View</a> %s' % (view, '[Not payed]' if fake else '')
else:
return super(InvoiceAdmin, self)._invoice(i)
_invoice.allow_tags = True
_invoice.short_description = 'Download'
admin.site.unregister(amodels.Invoice)
admin.site.register(amodels.Invoice, InvoiceAdmin)
class VotoTalkAdmin(admin.ModelAdmin):
list_display = ('user', '_name', 'talk', 'vote')
list_filter = ('talk__conference',
)
search_fields = [ 'talk__title',
'user__username',
'user__last_name', 'user__first_name' ]
ordering = ('-talk__conference', 'talk')
def _name(self, o):
url = urlresolvers.reverse('conference-profile',
kwargs={'slug': o.user.attendeeprofile.slug})
return '<a href="%s">%s %s</a>' % (url, o.user.first_name, o.user.last_name)
_name.allow_tags = True
_name.admin_order_field = 'user__first_name'
admin.site.register(cmodels.VotoTalk, VotoTalkAdmin)
class AttendeeProfileAdmin(admin.ModelAdmin):
list_display = ('_name',
'_user',
'company', 'location', 'visibility')
list_filter = ('visibility',
)
search_fields = [ 'user__username',
'user__last_name', 'user__first_name',
'company',
'location',
]
def _name(self, o):
url = urlresolvers.reverse('conference-profile',
kwargs={'slug': o.slug})
return '<a href="%s">%s %s</a>' % (url, o.user.first_name, o.user.last_name)
_name.allow_tags = True
_name.admin_order_field = 'user__first_name'
def _user(self, o):
url = urlresolvers.reverse('admin:auth_user_change',
args=(o.user.id,))
return '<a href="%s">%s</a>' % (url, o.user.username)
_user.allow_tags = True
_user.admin_order_field = 'user__username'
admin.site.register(cmodels.AttendeeProfile, AttendeeProfileAdmin)
# MAL: Commented out, since we don't really have a need for this:
#
# class TalkConferenceAdminForm(cadmin.TalkAdminForm):
# def __init__(self, *args, **kwargs):
# super(TalkConferenceAdminForm, self).__init__(*args, **kwargs)
# self.fields['tags'].required = False
#
# class TalkConferenceAdmin(cadmin.TalkAdmin):
# multilingual_widget = cforms.MarkEditWidget
# form = TalkConferenceAdminForm
#
# admin.site.unregister(cmodels.Talk)
# admin.site.register(cmodels.Talk, TalkConferenceAdmin)
class TalkAdmin(cadmin.TalkAdmin):
list_filter = ('conference', 'status', 'duration', 'type',
'level', 'tags__name', 'language',
)
search_fields = [ 'title',
'talkspeaker__speaker__user__last_name',
'talkspeaker__speaker__user__first_name',
'speakers__user__attendeeprofile__company',
]
list_display = ('title', 'conference', '_speakers',
'_company',
'duration', 'status', 'created',
'level', '_tags',
'_slides', '_video',
'language',
)
ordering = ('-conference', 'title')
multilingual_widget = cforms.MarkEditWidget
def _tags(self, obj):
return u', '.join(sorted(unicode(tag) for tag in obj.tags.all()))
def _company(self, obj):
companies = sorted(
set(speaker.user.attendeeprofile.company
for speaker in obj.speakers.all()
if speaker.user.attendeeprofile))
return u', '.join(companies)
_company.admin_order_field = 'speakers__user__attendeeprofile__company'
admin.site.unregister(cmodels.Talk)
admin.site.register(cmodels.Talk, TalkAdmin)
class OrderAdmin(aadmin.OrderAdmin):
list_display = aadmin.OrderAdmin.list_display + (
'country',
)
list_filter = aadmin.OrderAdmin.list_filter + (
DiscountListFilter,
'country',
)
admin.site.unregister(amodels.Order)
admin.site.register(amodels.Order, OrderAdmin)
class EventTrackInlineAdmin(admin.TabularInline):
model = cmodels.EventTrack
extra = 3
class EventAdmin(admin.ModelAdmin):
list_display = ('schedule',
'start_time',
'duration',
'_title',
'_tracks')
ordering = ('schedule',
'start_time',
'tracks',
)
list_filter = ('schedule',
'tracks')
search_fields = ['talk__title',
'custom',
]
inlines = (EventTrackInlineAdmin,
)
def _tracks(self, obj):
return ", ".join([track.track
for track in obj.tracks.all()])
def _title(self, obj):
if obj.custom:
return obj.custom
else:
return obj.talk
admin.site.register(cmodels.Event, EventAdmin)
class TrackAdmin(admin.ModelAdmin):
list_display = ('schedule',
'_slug',
'_date',
'track',
'title',
)
ordering = ('schedule',
'track',
)
list_filter = ('schedule',
'schedule__slug',
'track',
'title')
search_fields = ['schedule__conference',
'schedule__slug',
'track',
'title',
]
inlines = (EventTrackInlineAdmin,
)
list_select_related = True
def _slug(self, obj):
return obj.schedule.slug
def _date(self, obj):
return obj.schedule.date
admin.site.register(cmodels.Track, TrackAdmin)
class ScheduleAdmin(cadmin.ScheduleAdmin):
pass
admin.site.unregister(cmodels.Schedule)
admin.site.register(cmodels.Schedule, ScheduleAdmin)
### Orders Stats
# For simplicity, we monkey patch the
# assopy.stats.prezzo_biglietti_ricalcolato() function here.
#
# This is poor style, but until we have merged the packages into the
# epcon package, this is the easiest way forward.
def prezzo_biglietti_ricalcolato(**kw):
"""
Ricalcola il ricavo dei biglietti eliminando quelli gratuiti e
ridistribuendo il prezzo sui rimanenti.
"""
# mi interessano solo gli ordini che riguardano acquisti di biglietti
# "conferenza"
orders = amodels.Order.objects\
.filter(id__in=astats._orders(**kw))\
.values('id')\
.distinct()
fares = set(cmodels.Fare.objects\
.values_list('code', flat=True))
def _calc_prices(order_id, items):
"""
Elimina gli item degli sconti e riduce in maniera proporzionale
il valore dei restanti.
"""
prices = set()
discount = Decimal('0')
total = Decimal('0')
for item in items:
if item['price'] > 0:
prices.add(item['price'])
total += item['price']
else:
discount += item['price'] * -1
for ix, item in reversed(list(enumerate(items))):
if item['price'] > 0:
item['price'] = item['price'] * (total - discount) / total
else:
del items[ix]
grouped = defaultdict(list)
for ticket_type, ticket_type_description in cmodels.FARE_TICKET_TYPES:
qs = amodels.OrderItem.objects\
.filter(Q(ticket__isnull=True) |
Q(ticket__fare__ticket_type=ticket_type),
order__in=orders)\
.values_list('ticket__fare__code',
'ticket__fare__name',
'price',
'order')
for fcode, fname, price, oid in qs:
if fcode in fares or price < 0:
grouped[oid].append({
'code': fcode,
'name': fname,
'price': price,
})
for oid, items in grouped.items():
_calc_prices(oid, items)
# after using _calc_prices obtain the prices not found anymore
# of the ordinary rates, regroup the resulting OrderItem
# by rate code and new price
tcp = {}
for rows in grouped.values():
for item in rows:
code = item['code']
if code not in tcp:
tcp[code] = {
'code': code,
'name': item['name'],
'prices': {}
}
price = item['price']
if price not in tcp[code]['prices']:
tcp[code]['prices'][price] = { 'price': price, 'count': 0 }
tcp[code]['prices'][price]['count'] += 1
return tcp.values()
prezzo_biglietti_ricalcolato.template = '''
<table>
<tr>
<th>Code</th>
<th>Qty</th>
<th style="width: 70px;">Price</th>
</tr>
{% for ticket in data %}
{% for p in ticket.prices.values %}
<tr>
{% if forloop.counter == 1 %}
<td title="{{ ticket.name }}" rowspan="{{ ticket.prices|length }}">{{ ticket.code }}</td>
{% endif %}
<td>{{ p.count }}</td>
<td>€ {{ p.price|floatformat:"2" }}</td>
</tr>
{% endfor %}
{% endfor %}
</table>
'''
# Monkey patch our version into assopy package:
if 0:
astats.prezzo_biglietti_ricalcolato = prezzo_biglietti_ricalcolato
| barrachri/epcon | p3/admin.py | Python | bsd-2-clause | 27,045 | 0.00281 |
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.
Python installs libraries by defailt in site-packages/python-xxx/
But packages that are not dependend on the python version can be installed in a different prefix, e.g. lib
as long as we add this folder to the pythonpath.
@author: Kenneth Hoste, Jens Timmerman (Ghent University)
"""
import os
import re
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.pythonpackage import EASY_INSTALL_CMD, PythonPackage
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class VersionIndependentPythonPackage(PythonPackage):
"""Support for building/installing python packages without requiring a specific python package."""
def build_step(self):
"""No build procedure."""
pass
def prepare_step(self):
"""Set pylibdir"""
self.pylibdir = 'lib'
super(VersionIndependentPythonPackage, self).prepare_step()
def install_step(self):
"""Custom install procedure to skip selection of python package versions."""
full_pylibdir = os.path.join(self.installdir, self.pylibdir)
env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))
try:
os.mkdir(full_pylibdir)
except OSError, err:
# this will raise an error and not return
raise EasyBuildError("Failed to install: %s", err)
if self.install_cmd.startswith(EASY_INSTALL_CMD):
self.cfg.update('installopts', '--install-dir=%s' % full_pylibdir)
else:
extra_installopts = [
'--install-lib=%s' % full_pylibdir,
'--single-version-externally-managed',
'--record %s' % os.path.join(self.builddir, 'record'),
'--no-compile',
]
self.cfg.update('installopts', ' '.join(extra_installopts))
cmd = self.compose_install_command(self.installdir)
run_cmd(cmd, log_all=True, simple=True, log_output=True)
# setuptools stubbornly replaces the shebang line in scripts with
# the full path to the Python interpreter used to install;
# we change it (back) to '#!/usr/bin/env python' here
shebang_re = re.compile("^#!/.*python")
bindir = os.path.join(self.installdir, 'bin')
if os.path.exists(bindir):
for script in os.listdir(bindir):
script = os.path.join(bindir, script)
if os.path.isfile(script):
try:
txt = open(script, 'r').read()
if shebang_re.search(txt):
new_shebang = "#!/usr/bin/env python"
self.log.debug("Patching shebang header line in %s to '%s'" % (script, new_shebang))
txt = shebang_re.sub(new_shebang, txt)
open(script, 'w').write(txt)
except IOError, err:
raise EasyBuildError("Failed to patch shebang header line in %s: %s", script, err)
| valtandor/easybuild-easyblocks | easybuild/easyblocks/generic/versionindependentpythonpackage.py | Python | gpl-2.0 | 4,223 | 0.002368 |
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, fields, models
class ResPartner(models.Model):
_inherit = "res.partner"
_allowed_inactive_link_models = ["res.partner"]
_inactive_cascade = True
sta_mandate_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
sta_mandate_inactive_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates (Inactive)",
domain=[("active", "=", False)],
)
int_mandate_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
int_mandate_inactive_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
ext_mandate_inactive_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_count = fields.Integer(
string="External Mandates Nbr", compute="_compute_mandate_assembly_count"
)
ext_assembly_count = fields.Integer(
string="External Assemblies", compute="_compute_mandate_assembly_count"
)
def get_mandate_action(self):
"""
return an action for an ext.mandate contains into the domain a
specific tuples to get concerned mandates
"""
self.ensure_one()
res_ids = self._get_assemblies()._get_mandates().ids
domain = [("id", "in", res_ids)]
# get model's action to update its domain
action = self.env["ir.actions.act_window"]._for_xml_id(
"mozaik_mandate.ext_mandate_action"
)
action["domain"] = domain
return action
def _get_assemblies(self):
"""
return the assemblies of the current partner
"""
self.ensure_one()
assembly_model = "ext.assembly"
if self.is_assembly:
field = "partner_id"
else:
field = "ref_partner_id"
domain = [(field, "=", self.id)]
assembly_obj = self.env[assembly_model]
assemblies = assembly_obj.search(domain)
return assemblies
def _compute_mandate_assembly_count(self):
"""
count the number of assemblies linked to the current partner
count the number of mandates linked to the assemblies of the
current partner
"""
for partner in self:
assemblies = partner._get_assemblies()
partner.ext_assembly_count = len(assemblies)
partner.ext_mandate_count = len(assemblies._get_mandates())
def add_mandate_action(self):
self.ensure_one()
return {
"type": "ir.actions.act_window",
"name": _("Add a new mandate"),
"res_model": self._context.get("mandate_model"),
"context": {"default_partner_id": self.id},
"view_mode": "form",
"target": "new",
}
| mozaik-association/mozaik | mozaik_mandate/models/res_partner.py | Python | agpl-3.0 | 3,660 | 0.000273 |
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from . import configurator, usb_camera, genicam
| ricotabor/opendrop | opendrop/app/common/image_acquisition/configurator/__init__.py | Python | gpl-2.0 | 1,447 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MetricType.unit'
db.add_column(u'schools_metrictype', 'unit',
self.gf('django.db.models.fields.CharField')(default='', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MetricType.unit'
db.delete_column(u'schools_metrictype', 'unit')
models = {
u'schools.metric': {
'Meta': {'object_name': 'Metric'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['schools.MetricType']"}),
'metric_value': ('django.db.models.fields.TextField', [], {}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metrics'", 'to': u"orm['schools.School']"}),
'year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['schools.Year']"})
},
u'schools.metrictype': {
'Meta': {'object_name': 'MetricType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mtype': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'schools.school': {
'Meta': {'object_name': 'School'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.contrib.gis.db.models.fields.PointField', [], {})
},
u'schools.year': {
'Meta': {'object_name': 'Year'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['schools'] | formicablu/digischool | schools/migrations/0004_auto__add_field_metrictype_unit.py | Python | gpl-2.0 | 2,337 | 0.007274 |
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-email-subscription',
url='https://github.com/MagicSolutions/django-email-subscription',
version='0.0.1',
description='Django app for creating subcription accoutns.',
long_description=README,
install_requires=[
'django-simple-captcha>=0.4.2',
],
packages=find_packages(),
package_data={'': ['LICENSE']},
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
)
| MagicSolutions/django-email-subscription | setup.py | Python | mit | 888 | 0 |
#!/usr/bin/env python
# -*- coding: ascii -*-
"""
package.module
~~~~~~~~~~~~~
A description which can be long and explain the complete
functionality of this module even with indented code examples.
Class/Function however should not be documented here.
:copyright: year by my name, see AUTHORS for more details
:license: license_name, see LICENSE for more details
"""
import struct
import sys
outputfilename = 'raw_audio.out'
def do_convert(filename):
""" """
try:
f_in = open(filename, 'r')
f_out = open(outputfilename, 'wb')
sample = 0
for line in f_in:
try:
sample = int(line)
data = struct.pack("i", sample) # pack integer in a binary string
f_out.write(data)
except:
print "Cannot convert: " + line
finally:
f_in.close()
f_out.close()
if __name__=='__main__':
print "Converting..."
do_convert(sys.argv[1])
print "done. Written to " + outputfilename
| EPiCS/soundgates | hardware/tools/to_rawdata.py | Python | mit | 1,000 | 0.019 |
# stdlib
from collections import defaultdict
import datetime
import logging
import os
import shutil
import tempfile
# 3p
import xml.etree.ElementTree as ET
# project
from tests.checks.common import AgentCheckTest
logger = logging.getLogger(__file__)
DATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S'
LOG_DATA = 'Finished: SUCCESS'
SUCCESSFUL_BUILD = {'number': '99', 'result': 'SUCCESS', 'duration': '60'}
NO_RESULTS_YET = {'number': '99', 'duration': '60'}
UNSUCCESSFUL_BUILD = {'number': '99', 'result': 'ABORTED', 'duration': '60'}
CONFIG = """
init_config:
instances:
- name: default
jenkins_home: <JENKINS_HOME>
"""
def dict_to_xml(metadata_dict):
""" Convert a dict to xml for use in a build.xml file """
build = ET.Element('build')
for k, v in metadata_dict.iteritems():
node = ET.SubElement(build, k)
node.text = v
return ET.tostring(build)
def write_file(file_name, log_data):
with open(file_name, 'w') as log_file:
log_file.write(log_data)
class TestJenkins(AgentCheckTest):
CHECK_NAME = 'jenkins'
def setUp(self):
super(TestJenkins, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.config = {
'init_config': {},
'instances': [{
'name': 'default',
'jenkins_home': self.tmp_dir
}]
}
self.instance = self.config['instances'][0]
self.config_yaml = CONFIG.replace('<JENKINS_HOME>', self.tmp_dir)
self._create_old_build()
def tearDown(self):
super(TestJenkins, self).tearDown()
# Clean up the temp directory
shutil.rmtree(self.tmp_dir)
def _create_old_build(self):
# As coded, the jenkins dd agent needs more than one result
# in order to get the last valid build.
# Create one for yesterday.
metadata = dict_to_xml(SUCCESSFUL_BUILD)
yesterday = datetime.date.today() - datetime.timedelta(days=1)
self._populate_build_dir(metadata, yesterday)
def _create_check(self):
# Create the jenkins check
self.load_check(self.config)
def _populate_build_dir(self, metadata, time=None):
# The jenkins dd agent requires the build metadata file and a log file of results
time = time or datetime.datetime.now()
datestring = time.strftime(DATETIME_FORMAT)
build_dir = os.path.join(self.tmp_dir, 'jobs', 'foo', 'builds', datestring)
os.makedirs(build_dir)
log_file = os.path.join(build_dir, 'log')
log_data = LOG_DATA
write_file(log_file, log_data)
metadata_file = os.path.join(build_dir, 'build.xml')
build_metadata = metadata
write_file(metadata_file, build_metadata)
def testParseBuildLog(self):
"""
Test doing a jenkins check. This will parse the logs but since there was no
previous high watermark no event will be created.
"""
metadata = dict_to_xml(SUCCESSFUL_BUILD)
self._populate_build_dir(metadata)
self._create_check()
self.run_check(self.config)
# The check method does not return anything, so this testcase passes
# if the high_watermark was set and no exceptions were raised.
self.assertTrue(self.check.high_watermarks[self.instance['name']]['foo'] > 0)
def testCheckSuccessfulEvent(self):
"""
Test that a successful build will create the correct metrics.
"""
metadata = dict_to_xml(SUCCESSFUL_BUILD)
self._populate_build_dir(metadata)
self._create_check()
# Set the high_water mark so that the next check will create events
self.check.high_watermarks['default'] = defaultdict(lambda: 0)
self.run_check(self.config)
metrics_names = [m[0] for m in self.metrics]
assert len(metrics_names) == 2
assert 'jenkins.job.success' in metrics_names
assert 'jenkins.job.duration' in metrics_names
metrics_tags = [m[3] for m in self.metrics]
for tag in metrics_tags:
assert 'job_name:foo' in tag.get('tags')
assert 'result:SUCCESS' in tag.get('tags')
assert 'build_number:99' in tag.get('tags')
def testCheckUnsuccessfulEvent(self):
"""
Test that an unsuccessful build will create the correct metrics.
"""
metadata = dict_to_xml(UNSUCCESSFUL_BUILD)
self._populate_build_dir(metadata)
self._create_check()
# Set the high_water mark so that the next check will create events
self.check.high_watermarks['default'] = defaultdict(lambda: 0)
self.run_check(self.config)
metrics_names = [m[0] for m in self.metrics]
assert len(metrics_names) == 2
assert 'jenkins.job.failure' in metrics_names
assert 'jenkins.job.duration' in metrics_names
metrics_tags = [m[3] for m in self.metrics]
for tag in metrics_tags:
assert 'job_name:foo' in tag.get('tags')
assert 'result:ABORTED' in tag.get('tags')
assert 'build_number:99' in tag.get('tags')
def testCheckWithRunningBuild(self):
"""
Test under the conditions of a jenkins build still running.
The build.xml file will exist but it will not yet have a result.
"""
metadata = dict_to_xml(NO_RESULTS_YET)
self._populate_build_dir(metadata)
self._create_check()
# Set the high_water mark so that the next check will create events
self.check.high_watermarks['default'] = defaultdict(lambda: 0)
self.run_check(self.config)
# The check method does not return anything, so this testcase passes
# if the high_watermark was NOT updated and no exceptions were raised.
assert self.check.high_watermarks[self.instance['name']]['foo'] == 0
| varlib1/servermall | jenkins/test_jenkins.py | Python | bsd-3-clause | 5,889 | 0.000849 |
from collections import Counter
from os.path import splitext
import matplotlib.pyplot as plt
from arcapix.fs.gpfs import ListProcessingRule, ManagementPolicy
def type_sizes(file_list):
c = Counter()
for f in file_list:
c.update({splitext(f.name): f.filesize})
return c
p = ManagementPolicy()
r = p.rules.new(ListProcessingRule, 'types', type_sizes)
result = p.run('mmfs1')['types']
plt.pie(list(result.values()), labels=list(result.keys()), autopct='%1.1f%%')
plt.axis('equal')
plt.show()
| arcapix/gpfsapi-examples | type_sizes_piechart.py | Python | mit | 519 | 0.001927 |
# Generated by Django 2.2rc1 on 2019-03-26 13:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0042_remove_null_true'),
]
operations = [
migrations.AlterModelOptions(
name='catalog',
options={'ordering': ('order',), 'verbose_name': 'Catalog', 'verbose_name_plural': 'Catalogs'},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ('questionset', 'order'), 'verbose_name': 'Question', 'verbose_name_plural': 'Questions'},
),
migrations.AlterModelOptions(
name='questionset',
options={'ordering': ('section', 'order'), 'verbose_name': 'Question set', 'verbose_name_plural': 'Question set'},
),
migrations.AlterModelOptions(
name='section',
options={'ordering': ('catalog__order', 'order'), 'verbose_name': 'Section', 'verbose_name_plural': 'Sections'},
),
]
| rdmorganiser/rdmo | rdmo/questions/migrations/0043_django2.py | Python | apache-2.0 | 1,023 | 0.00391 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.